repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
exabon/godot
thirdparty/opus/silk/ana_filt_bank_1.c
479
3770
/*********************************************************************** Copyright (c) 2006-2011, Skype Limited. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Internet Society, IETF or IETF Trust, nor the names of specific contributors, may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "SigProc_FIX.h" /* Coefficients for 2-band filter bank based on first-order allpass filters */ static opus_int16 A_fb1_20 = 5394 << 1; static opus_int16 A_fb1_21 = -24290; /* (opus_int16)(20623 << 1) */ /* Split signal into two decimated bands using first-order allpass filters */ void silk_ana_filt_bank_1( const opus_int16 *in, /* I Input signal [N] */ opus_int32 *S, /* I/O State vector [2] */ opus_int16 *outL, /* O Low band [N/2] */ opus_int16 *outH, /* O High band [N/2] */ const opus_int32 N /* I Number of input samples */ ) { opus_int k, N2 = silk_RSHIFT( N, 1 ); opus_int32 in32, X, Y, out_1, out_2; /* Internal variables and state are in Q10 format */ for( k = 0; k < N2; k++ ) { /* Convert to Q10 */ in32 = silk_LSHIFT( (opus_int32)in[ 2 * k ], 10 ); /* All-pass section for even input sample */ Y = silk_SUB32( in32, S[ 0 ] ); X = silk_SMLAWB( Y, Y, A_fb1_21 ); out_1 = silk_ADD32( S[ 0 ], X ); S[ 0 ] = silk_ADD32( in32, X ); /* Convert to Q10 */ in32 = silk_LSHIFT( (opus_int32)in[ 2 * k + 1 ], 10 ); /* All-pass section for odd input sample, and add to output of previous section */ Y = silk_SUB32( in32, S[ 1 ] ); X = silk_SMULWB( Y, A_fb1_20 ); out_2 = silk_ADD32( S[ 1 ], X ); S[ 1 ] = silk_ADD32( in32, X ); /* Add/subtract, convert back to int16 and store to output */ outL[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_ADD32( out_2, out_1 ), 11 ) ); outH[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SUB32( out_2, out_1 ), 11 ) ); } }
mit
RyuKojiro/StormLib
src/libtomcrypt/src/pk/asn1/der_decode_printable_string.c
237
2258
/* LibTomCrypt, modular cryptographic library -- Tom St Denis * * LibTomCrypt is a library that provides various cryptographic * algorithms in a highly modular and flexible manner. * * The library is free for all purposes without any express * guarantee it works. * * Tom St Denis, tomstdenis@gmail.com, http://libtom.org */ #include "../../headers/tomcrypt.h" /** @file der_decode_printable_string.c ASN.1 DER, encode a printable STRING, Tom St Denis */ #ifdef LTC_DER /** Store a printable STRING @param in The DER encoded printable STRING @param inlen The size of the DER printable STRING @param out [out] The array of octets stored (one per char) @param outlen [in/out] The number of octets stored @return CRYPT_OK if successful */ int der_decode_printable_string(const unsigned char *in, unsigned long inlen, unsigned char *out, unsigned long *outlen) { unsigned long x, y, len; int t; LTC_ARGCHK(in != NULL); LTC_ARGCHK(out != NULL); LTC_ARGCHK(outlen != NULL); /* must have header at least */ if (inlen < 2) { return CRYPT_INVALID_PACKET; } /* check for 0x13 */ if ((in[0] & 0x1F) != 0x13) { return CRYPT_INVALID_PACKET; } x = 1; /* decode the length */ if (in[x] & 0x80) { /* valid # of bytes in length are 1,2,3 */ y = in[x] & 0x7F; if ((y == 0) || (y > 3) || ((x + y) > inlen)) { return CRYPT_INVALID_PACKET; } /* read the length in */ len = 0; ++x; while (y--) { len = (len << 8) | in[x++]; } } else { len = in[x++] & 0x7F; } /* is it too long? */ if (len > *outlen) { *outlen = len; return CRYPT_BUFFER_OVERFLOW; } if (len + x > inlen) { return CRYPT_INVALID_PACKET; } /* read the data */ for (y = 0; y < len; y++) { t = der_printable_value_decode(in[x++]); if (t == -1) { return CRYPT_INVALID_ARG; } out[y] = t; } *outlen = y; return CRYPT_OK; } #endif /* $Source: /cvs/libtom/libtomcrypt/src/pk/asn1/der/printable_string/der_decode_printable_string.c,v $ */ /* $Revision: 1.4 $ */ /* $Date: 2006/12/28 01:27:24 $ */
mit
safaricoin/trenchcoin
src/crypter.cpp
1264
3943
// Copyright (c) 2009-2012 The Bitcoin Developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <openssl/aes.h> #include <openssl/evp.h> #include <vector> #include <string> #ifdef WIN32 #include <windows.h> #endif #include "crypter.h" bool CCrypter::SetKeyFromPassphrase(const SecureString& strKeyData, const std::vector<unsigned char>& chSalt, const unsigned int nRounds, const unsigned int nDerivationMethod) { if (nRounds < 1 || chSalt.size() != WALLET_CRYPTO_SALT_SIZE) return false; int i = 0; if (nDerivationMethod == 0) i = EVP_BytesToKey(EVP_aes_256_cbc(), EVP_sha512(), &chSalt[0], (unsigned char *)&strKeyData[0], strKeyData.size(), nRounds, chKey, chIV); if (i != (int)WALLET_CRYPTO_KEY_SIZE) { OPENSSL_cleanse(chKey, sizeof(chKey)); OPENSSL_cleanse(chIV, sizeof(chIV)); return false; } fKeySet = true; return true; } bool CCrypter::SetKey(const CKeyingMaterial& chNewKey, const std::vector<unsigned char>& chNewIV) { if (chNewKey.size() != WALLET_CRYPTO_KEY_SIZE || chNewIV.size() != WALLET_CRYPTO_KEY_SIZE) return false; memcpy(&chKey[0], &chNewKey[0], sizeof chKey); memcpy(&chIV[0], &chNewIV[0], sizeof chIV); fKeySet = true; return true; } bool CCrypter::Encrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned char> &vchCiphertext) { if (!fKeySet) return false; // max ciphertext len for a n bytes of plaintext is // n + AES_BLOCK_SIZE - 1 bytes int nLen = vchPlaintext.size(); int nCLen = nLen + AES_BLOCK_SIZE, nFLen = 0; vchCiphertext = std::vector<unsigned char> (nCLen); EVP_CIPHER_CTX ctx; bool fOk = true; EVP_CIPHER_CTX_init(&ctx); if (fOk) fOk = EVP_EncryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, chKey, chIV); if (fOk) fOk = EVP_EncryptUpdate(&ctx, &vchCiphertext[0], &nCLen, &vchPlaintext[0], nLen); if (fOk) fOk = EVP_EncryptFinal_ex(&ctx, (&vchCiphertext[0])+nCLen, &nFLen); EVP_CIPHER_CTX_cleanup(&ctx); if (!fOk) return false; vchCiphertext.resize(nCLen + nFLen); return true; } bool CCrypter::Decrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingMaterial& vchPlaintext) { if (!fKeySet) return false; // plaintext will always be equal to or lesser than length of ciphertext int nLen = vchCiphertext.size(); int nPLen = nLen, nFLen = 0; vchPlaintext = CKeyingMaterial(nPLen); EVP_CIPHER_CTX ctx; bool fOk = true; EVP_CIPHER_CTX_init(&ctx); if (fOk) fOk = EVP_DecryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, chKey, chIV); if (fOk) fOk = EVP_DecryptUpdate(&ctx, &vchPlaintext[0], &nPLen, &vchCiphertext[0], nLen); if (fOk) fOk = EVP_DecryptFinal_ex(&ctx, (&vchPlaintext[0])+nPLen, &nFLen); EVP_CIPHER_CTX_cleanup(&ctx); if (!fOk) return false; vchPlaintext.resize(nPLen + nFLen); return true; } bool EncryptSecret(const CKeyingMaterial& vMasterKey, const CKeyingMaterial &vchPlaintext, const uint256& nIV, std::vector<unsigned char> &vchCiphertext) { CCrypter cKeyCrypter; std::vector<unsigned char> chIV(WALLET_CRYPTO_KEY_SIZE); memcpy(&chIV[0], &nIV, WALLET_CRYPTO_KEY_SIZE); if(!cKeyCrypter.SetKey(vMasterKey, chIV)) return false; return cKeyCrypter.Encrypt(*((const CKeyingMaterial*)&vchPlaintext), vchCiphertext); } bool DecryptSecret(const CKeyingMaterial& vMasterKey, const std::vector<unsigned char>& vchCiphertext, const uint256& nIV, CKeyingMaterial& vchPlaintext) { CCrypter cKeyCrypter; std::vector<unsigned char> chIV(WALLET_CRYPTO_KEY_SIZE); memcpy(&chIV[0], &nIV, WALLET_CRYPTO_KEY_SIZE); if(!cKeyCrypter.SetKey(vMasterKey, chIV)) return false; return cKeyCrypter.Decrypt(vchCiphertext, *((CKeyingMaterial*)&vchPlaintext)); }
mit
cyberegoorg/cetech
src/celib/os/private/os_window_sdl2.c
1
6721
#include <celib/id.h> #include <celib/macros.h> #include "celib/log.h" #include "celib/memory/allocator.h" #include "celib/api.h" #include "include/SDL2/SDL.h" #include "include/SDL2/SDL_syswm.h" #include <celib/os/window.h> #define LOG_WHERE "os_window_sdl" //============================================================================== // Private //============================================================================== static uint32_t _sdl_pos(const uint32_t pos) { switch (pos) { case WINDOWPOS_CENTERED: return SDL_WINDOWPOS_CENTERED; case WINDOWPOS_UNDEFINED: return SDL_WINDOWPOS_UNDEFINED; default: return pos; } } static struct { enum ce_window_flags from; SDL_WindowFlags to; } _flag_to_sdl[] = { {.from = WINDOW_NOFLAG, .to = 0}, {.from = WINDOW_FULLSCREEN, .to = SDL_WINDOW_FULLSCREEN}, {.from = WINDOW_SHOWN, .to = SDL_WINDOW_SHOWN}, {.from = WINDOW_HIDDEN, .to = SDL_WINDOW_HIDDEN}, {.from = WINDOW_BORDERLESS, .to = SDL_WINDOW_BORDERLESS}, {.from = WINDOW_RESIZABLE, .to = SDL_WINDOW_RESIZABLE}, {.from = WINDOW_MINIMIZED, .to = SDL_WINDOW_MINIMIZED}, {.from = WINDOW_MAXIMIZED, .to = SDL_WINDOW_MAXIMIZED}, {.from = WINDOW_INPUT_GRABBED, .to = SDL_WINDOW_INPUT_GRABBED}, {.from = WINDOW_INPUT_FOCUS, .to = SDL_WINDOW_INPUT_FOCUS}, {.from = WINDOW_MOUSE_FOCUS, .to = SDL_WINDOW_MOUSE_FOCUS}, {.from = WINDOW_FULLSCREEN_DESKTOP, .to = SDL_WINDOW_FULLSCREEN_DESKTOP}, {.from = WINDOW_ALLOW_HIGHDPI, .to = SDL_WINDOW_ALLOW_HIGHDPI}, {.from = WINDOW_MOUSE_CAPTURE, .to = SDL_WINDOW_MOUSE_CAPTURE}, {.from = WINDOW_ALWAYS_ON_TOP, .to = SDL_WINDOW_ALWAYS_ON_TOP}, {.from = WINDOW_SKIP_TASKBAR, .to = SDL_WINDOW_SKIP_TASKBAR}, {.from = WINDOW_UTILITY, .to = SDL_WINDOW_UTILITY}, {.from = WINDOW_TOOLTIP, .to = SDL_WINDOW_TOOLTIP}, {.from = WINDOW_POPUP_MENU, .to = SDL_WINDOW_POPUP_MENU}, }; static uint32_t _sdl_flags(uint32_t flags) { uint32_t sdl_flags = 0; for (uint32_t i = 1; i < CE_ARRAY_LEN(_flag_to_sdl); ++i) { if (flags & _flag_to_sdl[i].from) { sdl_flags |= _flag_to_sdl[i].to; } } return sdl_flags; } //============================================================================== // Interface //============================================================================== void window_set_title(ce_window_o0 *w, const char *title) { SDL_SetWindowTitle((SDL_Window *) w, title); } const char *window_get_title(ce_window_o0 *w) { return SDL_GetWindowTitle((SDL_Window *) w); } void window_resize(ce_window_o0 *w, uint32_t width, uint32_t height) { SDL_SetWindowSize((SDL_Window *) w, width, height); } void window_get_size(ce_window_o0 *window, uint32_t *width, uint32_t *height) { int w, h; w = h = 0; SDL_GetWindowSize((SDL_Window *) window, &w, &h); *width = (uint32_t) w; *height = (uint32_t) h; } void *window_native_window_ptr(ce_window_o0 *w) { SDL_SysWMinfo wmi = {}; SDL_VERSION(&wmi.version); if (!SDL_GetWindowWMInfo((SDL_Window *) w, &wmi)) { return 0; } #if defined(CE_WINDOWS) return (void *) wmi.info.win.window; #elif CE_PLATFORM_LINUX return (void *) wmi.info.x11.window; #elif CE_PLATFORM_OSX return (void *) wmi.info.cocoa.window; #endif } void *window_native_display_ptr(ce_window_o0 *w) { SDL_SysWMinfo wmi; SDL_VERSION(&wmi.version); if (!SDL_GetWindowWMInfo((SDL_Window *) w, &wmi)) { return 0; } #if defined(CE_WINDOWS) return (void *) wmi.info.win.hdc; #elif CE_PLATFORM_LINUX return (void *) wmi.info.x11.display; #elif CE_PLATFORM_OSX return (0); #endif } void warp_mouse(ce_window_o0 *w, int x, int y) { SDL_WarpMouseInWindow((SDL_Window *) w, x, y); } struct ce_window_t0 *window_new(const char *title, enum ce_window_pos x, enum ce_window_pos y, const int32_t width, const int32_t height, uint32_t flags, ce_alloc_t0 *alloc) { ce_window_t0 *window = CE_ALLOC(alloc, ce_window_t0, sizeof(ce_window_t0)); if (CE_PLATFORM_LINUX) { SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2); SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE); } SDL_Window *w = SDL_CreateWindow( title, _sdl_pos(x), _sdl_pos(y), width, height, _sdl_flags(flags) ); if (w == NULL) { ce_log_a0->error(LOG_WHERE, "Could not create window: %s", SDL_GetError()); } *window = (ce_window_t0) { .inst = w, .set_title = window_set_title, .get_title = window_get_title, .resize = window_resize, .size = window_get_size, .native_window_ptr = window_native_window_ptr, .native_display_ptr = window_native_display_ptr, .warp_mouse = warp_mouse, }; return window; } struct ce_window_t0 *window_new_from(void *hndl, ce_alloc_t0 *alloc) { ce_window_t0 *window = CE_ALLOC(alloc, ce_window_t0, sizeof(ce_window_t0)); SDL_Window *w = SDL_CreateWindowFrom(hndl); if (w == NULL) { ce_log_a0->error(LOG_WHERE, "Could not create window: %s", SDL_GetError()); } *window = (ce_window_t0) { .inst = w, .set_title = window_set_title, .get_title = window_get_title, .resize = window_resize, .size = window_get_size, .native_window_ptr = window_native_window_ptr, .native_display_ptr = window_native_display_ptr, .warp_mouse = warp_mouse, }; return window; } void window_destroy(struct ce_window_t0 *w, ce_alloc_t0 *alloc) { SDL_DestroyWindow((SDL_Window *) w->inst); CE_FREE(alloc, w); } struct ce_os_window_a0 window_api = { .create = window_new, .create_from = window_new_from, .destroy = window_destroy, }; struct ce_os_window_a0 *ce_os_window_a0 = &window_api; int sdl_window_init(struct ce_api_a0 *api) { return 1; } void sdl_window_shutdown() { }
cc0-1.0
CyanogenMod/lge-kernel-msm7x30
drivers/gpu/drm/drm_drv.c
256
17153
/** * \file drm_drv.c * Generic driver template * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> * * To use this template, you must at least define the following (samples * given for the MGA driver): * * \code * #define DRIVER_AUTHOR "VA Linux Systems, Inc." * * #define DRIVER_NAME "mga" * #define DRIVER_DESC "Matrox G200/G400" * #define DRIVER_DATE "20001127" * * #define drm_x mga_##x * \endcode */ /* * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/debugfs.h> #include <linux/slab.h> #include "drmP.h" #include "drm_core.h" static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); /** Ioctl table */ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #if __OS_HAS_AGP DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #endif DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) /** * Take down the DRM device. * * \param dev DRM device structure. * * Frees every resource in \p dev. * * \sa drm_device */ int drm_lastclose(struct drm_device * dev) { struct drm_vma_entry *vma, *vma_temp; int i; DRM_DEBUG("\n"); if (dev->driver->lastclose) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); /* Free drawable information memory */ drm_drawable_free_all(dev); del_timer(&dev->timer); /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { if (entry->bound) drm_unbind_agp(entry->memory); drm_free_agp(entry->memory, entry->pages); kfree(entry); } INIT_LIST_HEAD(&dev->agp->memory); if (dev->agp->acquired) drm_agp_release(dev); dev->agp->acquired = 0; dev->agp->enabled = 0; } if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && !drm_core_check_feature(dev, DRIVER_MODESET)) { drm_sg_cleanup(dev->sg); dev->sg = NULL; } /* Clear vma list (only built for debugging) */ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { list_del(&vma->head); kfree(vma); } if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { kfree(dev->queuelist[i]); dev->queuelist[i] = NULL; } kfree(dev->queuelist); dev->queuelist = NULL; } dev->queue_count = 0; if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) drm_dma_takedown(dev); dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); DRM_DEBUG("lastclose completed\n"); return 0; } /** * Module initialization. Called via init_module at module load time, or via * linux/init/main.c (this is not currently supported). * * \return zero on success or a negative number on failure. * * Initializes an array of drm_device structures, and attempts to * initialize all available devices, using consecutive minors, registering the * stubs and initializing the device. * * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and * after the initialization for driver customization. */ int drm_init(struct drm_driver *driver) { DRM_DEBUG("\n"); INIT_LIST_HEAD(&driver->device_list); if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE) return drm_platform_init(driver); else return drm_pci_init(driver); } EXPORT_SYMBOL(drm_init); void drm_exit(struct drm_driver *driver) { struct drm_device *dev, *tmp; DRM_DEBUG("\n"); if (driver->driver_features & DRIVER_MODESET) { pci_unregister_driver(&driver->pci_driver); } else { list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) drm_put_dev(dev); } DRM_INFO("Module unloaded\n"); } EXPORT_SYMBOL(drm_exit); /** File operations structure */ static const struct file_operations drm_stub_fops = { .owner = THIS_MODULE, .open = drm_stub_open }; static int __init drm_core_init(void) { int ret = -ENOMEM; idr_init(&drm_minors_idr); if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) goto err_p1; drm_class = drm_sysfs_create(THIS_MODULE, "drm"); if (IS_ERR(drm_class)) { printk(KERN_ERR "DRM: Error creating drm class.\n"); ret = PTR_ERR(drm_class); goto err_p2; } drm_proc_root = proc_mkdir("dri", NULL); if (!drm_proc_root) { DRM_ERROR("Cannot create /proc/dri\n"); ret = -1; goto err_p3; } drm_debugfs_root = debugfs_create_dir("dri", NULL); if (!drm_debugfs_root) { DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); ret = -1; goto err_p3; } DRM_INFO("Initialized %s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; err_p3: drm_sysfs_destroy(); err_p2: unregister_chrdev(DRM_MAJOR, "drm"); idr_destroy(&drm_minors_idr); err_p1: return ret; } static void __exit drm_core_exit(void) { remove_proc_entry("dri", NULL); debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); unregister_chrdev(DRM_MAJOR, "drm"); idr_destroy(&drm_minors_idr); } module_init(drm_core_init); module_exit(drm_core_exit); /** * Copy and IOCTL return string to user space */ static int drm_copy_field(char *buf, size_t *buf_len, const char *value) { int len; /* don't overflow userbuf */ len = strlen(value); if (len > *buf_len) len = *buf_len; /* let userspace know exact length of driver value (which could be * larger than the userspace-supplied buffer) */ *buf_len = strlen(value); /* finally, try filling in the userbuf */ if (len && buf) if (copy_to_user(buf, value, len)) return -EFAULT; return 0; } /** * Get version information * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg user argument, pointing to a drm_version structure. * \return zero on success or negative number on failure. * * Fills in the version information in \p arg. */ static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_version *version = data; int err; version->version_major = dev->driver->major; version->version_minor = dev->driver->minor; version->version_patchlevel = dev->driver->patchlevel; err = drm_copy_field(version->name, &version->name_len, dev->driver->name); if (!err) err = drm_copy_field(version->date, &version->date_len, dev->driver->date); if (!err) err = drm_copy_field(version->desc, &version->desc_len, dev->driver->desc); return err; } /** * Called whenever a process performs an ioctl on /dev/drm. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. * * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. */ long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_file *file_priv = filp->private_data; struct drm_device *dev; struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; char stack_kdata[128]; char *kdata = NULL; dev = file_priv->minor->dev; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", task_pid_nr(current), cmd, nr, (long)old_encode_dev(file_priv->minor->device), file_priv->authenticated); if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { ioctl = &drm_ioctls[nr]; cmd = ioctl->cmd; } else goto err_i1; /* Do not trust userspace, use our own definition */ func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) func = dev->driver->dma_ioctl; if (!func) { DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { retcode = -EACCES; } else { if (cmd & (IOC_IN | IOC_OUT)) { if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) { kdata = stack_kdata; } else { kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); if (!kdata) { retcode = -ENOMEM; goto err_i1; } } } if (cmd & IOC_IN) { if (copy_from_user(kdata, (void __user *)arg, _IOC_SIZE(cmd)) != 0) { retcode = -EFAULT; goto err_i1; } } else memset(kdata, 0, _IOC_SIZE(cmd)); if (ioctl->flags & DRM_UNLOCKED) retcode = func(dev, kdata, file_priv); else { lock_kernel(); retcode = func(dev, kdata, file_priv); unlock_kernel(); } if (cmd & IOC_OUT) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EFAULT; } } err_i1: if (kdata != stack_kdata) kfree(kdata); atomic_dec(&dev->ioctl_count); if (retcode) DRM_DEBUG("ret = %x\n", retcode); return retcode; } EXPORT_SYMBOL(drm_ioctl); struct drm_local_map *drm_getsarea(struct drm_device *dev) { struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && (entry->map->flags & _DRM_CONTAINS_LOCK)) { return entry->map; } } return NULL; } EXPORT_SYMBOL(drm_getsarea);
gpl-2.0
liqiang199105/linux
drivers/gpu/drm/radeon/radeon_object.c
256
22576
/* * Copyright 2009 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Jerome Glisse <glisse@freedesktop.org> * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie */ #include <linux/list.h> #include <linux/slab.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon.h" #include "radeon_trace.h" int radeon_ttm_init(struct radeon_device *rdev); void radeon_ttm_fini(struct radeon_device *rdev); static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); /* * To exclude mutual BO access we rely on bo_reserve exclusion, as all * function are calling it. */ static void radeon_update_memory_usage(struct radeon_bo *bo, unsigned mem_type, int sign) { struct radeon_device *rdev = bo->rdev; u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; switch (mem_type) { case TTM_PL_TT: if (sign > 0) atomic64_add(size, &rdev->gtt_usage); else atomic64_sub(size, &rdev->gtt_usage); break; case TTM_PL_VRAM: if (sign > 0) atomic64_add(size, &rdev->vram_usage); else atomic64_sub(size, &rdev->vram_usage); break; } } static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct radeon_bo *bo; bo = container_of(tbo, struct radeon_bo, tbo); radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); WARN_ON(!list_empty(&bo->va)); drm_gem_object_release(&bo->gem_base); kfree(bo); } bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) { if (bo->destroy == &radeon_ttm_bo_destroy) return true; return false; } void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { u32 c = 0, i; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) { /* Try placing BOs which don't need CPU access outside of the * CPU accessible part of VRAM */ if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; } rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; } if (domain & RADEON_GEM_DOMAIN_GTT) { if (rbo->flags & RADEON_GEM_GTT_UC) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; } else if ((rbo->flags & RADEON_GEM_GTT_WC) || (rbo->rdev->flags & RADEON_IS_AGP)) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; } else { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; } } if (domain & RADEON_GEM_DOMAIN_CPU) { if (rbo->flags & RADEON_GEM_GTT_UC) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; } else if ((rbo->flags & RADEON_GEM_GTT_WC) || rbo->rdev->flags & RADEON_IS_AGP) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; } else { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; } } if (!c) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; } rbo->placement.num_placement = c; rbo->placement.num_busy_placement = c; for (i = 0; i < c; ++i) { if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && !rbo->placements[i].fpfn) rbo->placements[i].lpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; else rbo->placements[i].lpfn = 0; } } int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, struct reservation_object *resv, struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; size_t acc_size; int r; size = ALIGN(size, PAGE_SIZE); if (kernel) { type = ttm_bo_type_kernel; } else if (sg) { type = ttm_bo_type_sg; } else { type = ttm_bo_type_device; } *bo_ptr = NULL; acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, sizeof(struct radeon_bo)); bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); if (unlikely(r)) { kfree(bo); return r; } bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_CPU); bo->flags = flags; /* PCI GART is always snooped */ if (!(rdev->flags & RADEON_IS_PCIE)) bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); #ifdef CONFIG_X86_32 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 */ bo->flags &= ~RADEON_GEM_GTT_WC; #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) /* Don't try to enable write-combining when it can't work, or things * may be slow * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 */ #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ thanks to write-combining DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n"); bo->flags &= ~RADEON_GEM_GTT_WC; #endif radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; } *bo_ptr = bo; trace_radeon_bo_create(bo); return 0; } int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) { bool is_iomem; int r; if (bo->kptr) { if (ptr) { *ptr = bo->kptr; } return 0; } r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); if (r) { return r; } bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) { *ptr = bo->kptr; } radeon_bo_check_tiling(bo, 0, 0); return 0; } void radeon_bo_kunmap(struct radeon_bo *bo) { if (bo->kptr == NULL) return; bo->kptr = NULL; radeon_bo_check_tiling(bo, 0, 0); ttm_bo_kunmap(&bo->kmap); } struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) { if (bo == NULL) return NULL; ttm_bo_reference(&bo->tbo); return bo; } void radeon_bo_unref(struct radeon_bo **bo) { struct ttm_buffer_object *tbo; struct radeon_device *rdev; if ((*bo) == NULL) return; rdev = (*bo)->rdev; tbo = &((*bo)->tbo); ttm_bo_unref(&tbo); if (tbo == NULL) *bo = NULL; } int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, u64 *gpu_addr) { int r, i; if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) return -EPERM; if (bo->pin_count) { bo->pin_count++; if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); if (max_offset != 0) { u64 domain_start; if (domain == RADEON_GEM_DOMAIN_VRAM) domain_start = bo->rdev->mc.vram_start; else domain_start = bo->rdev->mc.gtt_start; WARN_ON_ONCE(max_offset < (radeon_bo_gpu_offset(bo) - domain_start)); } return 0; } radeon_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) bo->placements[i].lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; else bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); if (domain == RADEON_GEM_DOMAIN_VRAM) bo->rdev->vram_pin_size += radeon_bo_size(bo); else bo->rdev->gart_pin_size += radeon_bo_size(bo); } else { dev_err(bo->rdev->dev, "%p pin failed\n", bo); } return r; } int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) { return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); } int radeon_bo_unpin(struct radeon_bo *bo) { int r, i; if (!bo->pin_count) { dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); return 0; } bo->pin_count--; if (bo->pin_count) return 0; for (i = 0; i < bo->placement.num_placement; i++) { bo->placements[i].lpfn = 0; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) bo->rdev->vram_pin_size -= radeon_bo_size(bo); else bo->rdev->gart_pin_size -= radeon_bo_size(bo); } else { dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); } return r; } int radeon_bo_evict_vram(struct radeon_device *rdev) { /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ if (0 && (rdev->flags & RADEON_IS_IGP)) { if (rdev->mc.igp_sideport_enabled == false) /* Useless to evict on IGP chips */ return 0; } return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); } void radeon_bo_force_delete(struct radeon_device *rdev) { struct radeon_bo *bo, *n; if (list_empty(&rdev->gem.objects)) { return; } dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { mutex_lock(&rdev->ddev->struct_mutex); dev_err(rdev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ drm_gem_object_unreference(&bo->gem_base); mutex_unlock(&rdev->ddev->struct_mutex); } } int radeon_bo_init(struct radeon_device *rdev) { /* Add an MTRR for the VRAM */ if (!rdev->fastfb_working) { rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, rdev->mc.aper_size); } DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", rdev->mc.mc_vram_size >> 20, (unsigned long long)rdev->mc.aper_size >> 20); DRM_INFO("RAM width %dbits %cDR\n", rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); return radeon_ttm_init(rdev); } void radeon_bo_fini(struct radeon_device *rdev) { radeon_ttm_fini(rdev); arch_phys_wc_del(rdev->mc.vram_mtrr); } /* Returns how many bytes TTM can move per IB. */ static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) { u64 real_vram_size = rdev->mc.real_vram_size; u64 vram_usage = atomic64_read(&rdev->vram_usage); /* This function is based on the current VRAM usage. * * - If all of VRAM is free, allow relocating the number of bytes that * is equal to 1/4 of the size of VRAM for this IB. * - If more than one half of VRAM is occupied, only allow relocating * 1 MB of data for this IB. * * - From 0 to one half of used VRAM, the threshold decreases * linearly. * __________________ * 1/4 of -|\ | * VRAM | \ | * | \ | * | \ | * | \ | * | \ | * | \ | * | \________|1 MB * |----------------| * VRAM 0 % 100 % * used used * * Note: It's a threshold, not a limit. The threshold must be crossed * for buffer relocations to stop, so any buffer of an arbitrary size * can be moved as long as the threshold isn't crossed before * the relocation takes place. We don't want to disable buffer * relocations completely. * * The idea is that buffers should be placed in VRAM at creation time * and TTM should only do a minimum number of relocations during * command submission. In practice, you need to submit at least * a dozen IBs to move all buffers to VRAM if they are in GTT. * * Also, things can get pretty crazy under memory pressure and actual * VRAM usage can change a lot, so playing safe even at 50% does * consistently increase performance. */ u64 half_vram = real_vram_size >> 1; u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; u64 bytes_moved_threshold = half_free_vram >> 1; return max(bytes_moved_threshold, 1024*1024ull); } int radeon_bo_list_validate(struct radeon_device *rdev, struct ww_acquire_ctx *ticket, struct list_head *head, int ring) { struct radeon_bo_list *lobj; struct list_head duplicates; int r; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); INIT_LIST_HEAD(&duplicates); r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); if (unlikely(r != 0)) { return r; } list_for_each_entry(lobj, head, tv.head) { struct radeon_bo *bo = lobj->robj; if (!bo->pin_count) { u32 domain = lobj->prefered_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = radeon_mem_type_to_domain(bo->tbo.mem.mem_type); /* Check if this buffer will be moved and don't move it * if we have moved too many buffers for this IB already. * * Note that this allows moving at least one buffer of * any size, because it doesn't take the current "bo" * into account. We don't want to disallow buffer moves * completely. */ if ((allowed & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ domain = current_domain; } retry: radeon_ttm_placement_from_domain(bo, domain); if (ring == R600_RING_TYPE_UVD_INDEX) radeon_uvd_force_into_uvd_segment(bo, allowed); initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); bytes_moved += atomic64_read(&rdev->num_bytes_moved) - initial_bytes_moved; if (unlikely(r)) { if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { domain = lobj->allowed_domains; goto retry; } ttm_eu_backoff_reservation(ticket, head); return r; } } lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; } list_for_each_entry(lobj, &duplicates, tv.head) { lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); lobj->tiling_flags = lobj->robj->tiling_flags; } return 0; } int radeon_bo_get_surface_reg(struct radeon_bo *bo) { struct radeon_device *rdev = bo->rdev; struct radeon_surface_reg *reg; struct radeon_bo *old_object; int steal; int i; lockdep_assert_held(&bo->tbo.resv->lock.base); if (!bo->tiling_flags) return 0; if (bo->surface_reg >= 0) { reg = &rdev->surface_regs[bo->surface_reg]; i = bo->surface_reg; goto out; } steal = -1; for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { reg = &rdev->surface_regs[i]; if (!reg->bo) break; old_object = reg->bo; if (old_object->pin_count == 0) steal = i; } /* if we are all out */ if (i == RADEON_GEM_MAX_SURFACES) { if (steal == -1) return -ENOMEM; /* find someone with a surface reg and nuke their BO */ reg = &rdev->surface_regs[steal]; old_object = reg->bo; /* blow away the mapping */ DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); ttm_bo_unmap_virtual(&old_object->tbo); old_object->surface_reg = -1; i = steal; } bo->surface_reg = i; reg->bo = bo; out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, bo->tbo.mem.start << PAGE_SHIFT, bo->tbo.num_pages << PAGE_SHIFT); return 0; } static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) { struct radeon_device *rdev = bo->rdev; struct radeon_surface_reg *reg; if (bo->surface_reg == -1) return; reg = &rdev->surface_regs[bo->surface_reg]; radeon_clear_surface_reg(rdev, bo->surface_reg); reg->bo = NULL; bo->surface_reg = -1; } int radeon_bo_set_tiling_flags(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch) { struct radeon_device *rdev = bo->rdev; int r; if (rdev->family >= CHIP_CEDAR) { unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; switch (bankw) { case 0: case 1: case 2: case 4: case 8: break; default: return -EINVAL; } switch (bankh) { case 0: case 1: case 2: case 4: case 8: break; default: return -EINVAL; } switch (mtaspect) { case 0: case 1: case 2: case 4: case 8: break; default: return -EINVAL; } if (tilesplit > 6) { return -EINVAL; } if (stilesplit > 6) { return -EINVAL; } } r = radeon_bo_reserve(bo, false); if (unlikely(r != 0)) return r; bo->tiling_flags = tiling_flags; bo->pitch = pitch; radeon_bo_unreserve(bo); return 0; } void radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) { lockdep_assert_held(&bo->tbo.resv->lock.base); if (tiling_flags) *tiling_flags = bo->tiling_flags; if (pitch) *pitch = bo->pitch; } int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) { if (!force_drop) lockdep_assert_held(&bo->tbo.resv->lock.base); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; if (force_drop) { radeon_bo_clear_surface_reg(bo); return 0; } if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { if (!has_moved) return 0; if (bo->surface_reg >= 0) radeon_bo_clear_surface_reg(bo); return 0; } if ((bo->surface_reg >= 0) && !has_moved) return 0; return radeon_bo_get_surface_reg(bo); } void radeon_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct radeon_bo *rbo; if (!radeon_ttm_bo_is_radeon_bo(bo)) return; rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 1); radeon_vm_bo_invalidate(rbo->rdev, rbo); /* update statistics */ if (!new_mem) return; radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct radeon_device *rdev; struct radeon_bo *rbo; unsigned long offset, size, lpfn; int i, r; if (!radeon_ttm_bo_is_radeon_bo(bo)) return 0; rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; if (bo->mem.mem_type != TTM_PL_VRAM) return 0; size = bo->mem.num_pages << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT; if ((offset + size) <= rdev->mc.visible_vram_size) return 0; /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < rbo->placement.num_placement; i++) { /* Force into visible VRAM */ if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) rbo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &rbo->placement, false, false); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); return ttm_bo_validate(bo, &rbo->placement, false, false); } else if (unlikely(r != 0)) { return r; } offset = bo->mem.start << PAGE_SHIFT; /* this should never happen */ if ((offset + size) > rdev->mc.visible_vram_size) return -EINVAL; return 0; } int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) { int r; r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) return r; if (mem_type) *mem_type = bo->tbo.mem.mem_type; r = ttm_bo_wait(&bo->tbo, true, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } /** * radeon_bo_fence - add fence to buffer object * * @bo: buffer object in question * @fence: fence to add * @shared: true if fence should be added shared * */ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared) { struct reservation_object *resv = bo->tbo.resv; if (shared) reservation_object_add_shared_fence(resv, &fence->base); else reservation_object_add_excl_fence(resv, &fence->base); }
gpl-2.0
segment-routing/sr-ipv6
drivers/net/can/m_can/m_can.c
256
32169
/* * CAN bus driver for Bosch M_CAN controller * * Copyright (C) 2014 Freescale Semiconductor, Inc. * Dong Aisheng <b29396@freescale.com> * * Bosch M_CAN user manual can be obtained from: * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ * mcan_users_manual_v302.pdf * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/can/dev.h> /* napi related */ #define M_CAN_NAPI_WEIGHT 64 /* message ram configuration data length */ #define MRAM_CFG_LEN 8 /* registers definition */ enum m_can_reg { M_CAN_CREL = 0x0, M_CAN_ENDN = 0x4, M_CAN_CUST = 0x8, M_CAN_FBTP = 0xc, M_CAN_TEST = 0x10, M_CAN_RWD = 0x14, M_CAN_CCCR = 0x18, M_CAN_BTP = 0x1c, M_CAN_TSCC = 0x20, M_CAN_TSCV = 0x24, M_CAN_TOCC = 0x28, M_CAN_TOCV = 0x2c, M_CAN_ECR = 0x40, M_CAN_PSR = 0x44, M_CAN_IR = 0x50, M_CAN_IE = 0x54, M_CAN_ILS = 0x58, M_CAN_ILE = 0x5c, M_CAN_GFC = 0x80, M_CAN_SIDFC = 0x84, M_CAN_XIDFC = 0x88, M_CAN_XIDAM = 0x90, M_CAN_HPMS = 0x94, M_CAN_NDAT1 = 0x98, M_CAN_NDAT2 = 0x9c, M_CAN_RXF0C = 0xa0, M_CAN_RXF0S = 0xa4, M_CAN_RXF0A = 0xa8, M_CAN_RXBC = 0xac, M_CAN_RXF1C = 0xb0, M_CAN_RXF1S = 0xb4, M_CAN_RXF1A = 0xb8, M_CAN_RXESC = 0xbc, M_CAN_TXBC = 0xc0, M_CAN_TXFQS = 0xc4, M_CAN_TXESC = 0xc8, M_CAN_TXBRP = 0xcc, M_CAN_TXBAR = 0xd0, M_CAN_TXBCR = 0xd4, M_CAN_TXBTO = 0xd8, M_CAN_TXBCF = 0xdc, M_CAN_TXBTIE = 0xe0, M_CAN_TXBCIE = 0xe4, M_CAN_TXEFC = 0xf0, M_CAN_TXEFS = 0xf4, M_CAN_TXEFA = 0xf8, }; /* m_can lec values */ enum m_can_lec_type { LEC_NO_ERROR = 0, LEC_STUFF_ERROR, LEC_FORM_ERROR, LEC_ACK_ERROR, LEC_BIT1_ERROR, LEC_BIT0_ERROR, LEC_CRC_ERROR, LEC_UNUSED, }; enum m_can_mram_cfg { MRAM_SIDF = 0, MRAM_XIDF, MRAM_RXF0, MRAM_RXF1, MRAM_RXB, MRAM_TXE, MRAM_TXB, MRAM_CFG_NUM, }; /* Fast Bit Timing & Prescaler Register (FBTP) */ #define FBTR_FBRP_MASK 0x1f #define FBTR_FBRP_SHIFT 16 #define FBTR_FTSEG1_SHIFT 8 #define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) #define FBTR_FTSEG2_SHIFT 4 #define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) #define FBTR_FSJW_SHIFT 0 #define FBTR_FSJW_MASK 0x3 /* Test Register (TEST) */ #define TEST_LBCK BIT(4) /* CC Control Register(CCCR) */ #define CCCR_TEST BIT(7) #define CCCR_CMR_MASK 0x3 #define CCCR_CMR_SHIFT 10 #define CCCR_CMR_CANFD 0x1 #define CCCR_CMR_CANFD_BRS 0x2 #define CCCR_CMR_CAN 0x3 #define CCCR_CME_MASK 0x3 #define CCCR_CME_SHIFT 8 #define CCCR_CME_CAN 0 #define CCCR_CME_CANFD 0x1 #define CCCR_CME_CANFD_BRS 0x2 #define CCCR_TEST BIT(7) #define CCCR_MON BIT(5) #define CCCR_CCE BIT(1) #define CCCR_INIT BIT(0) #define CCCR_CANFD 0x10 /* Bit Timing & Prescaler Register (BTP) */ #define BTR_BRP_MASK 0x3ff #define BTR_BRP_SHIFT 16 #define BTR_TSEG1_SHIFT 8 #define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT) #define BTR_TSEG2_SHIFT 4 #define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT) #define BTR_SJW_SHIFT 0 #define BTR_SJW_MASK 0xf /* Error Counter Register(ECR) */ #define ECR_RP BIT(15) #define ECR_REC_SHIFT 8 #define ECR_REC_MASK (0x7f << ECR_REC_SHIFT) #define ECR_TEC_SHIFT 0 #define ECR_TEC_MASK 0xff /* Protocol Status Register(PSR) */ #define PSR_BO BIT(7) #define PSR_EW BIT(6) #define PSR_EP BIT(5) #define PSR_LEC_MASK 0x7 /* Interrupt Register(IR) */ #define IR_ALL_INT 0xffffffff #define IR_STE BIT(31) #define IR_FOE BIT(30) #define IR_ACKE BIT(29) #define IR_BE BIT(28) #define IR_CRCE BIT(27) #define IR_WDI BIT(26) #define IR_BO BIT(25) #define IR_EW BIT(24) #define IR_EP BIT(23) #define IR_ELO BIT(22) #define IR_BEU BIT(21) #define IR_BEC BIT(20) #define IR_DRX BIT(19) #define IR_TOO BIT(18) #define IR_MRAF BIT(17) #define IR_TSW BIT(16) #define IR_TEFL BIT(15) #define IR_TEFF BIT(14) #define IR_TEFW BIT(13) #define IR_TEFN BIT(12) #define IR_TFE BIT(11) #define IR_TCF BIT(10) #define IR_TC BIT(9) #define IR_HPM BIT(8) #define IR_RF1L BIT(7) #define IR_RF1F BIT(6) #define IR_RF1W BIT(5) #define IR_RF1N BIT(4) #define IR_RF0L BIT(3) #define IR_RF0F BIT(2) #define IR_RF0W BIT(1) #define IR_RF0N BIT(0) #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) #define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) #define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \ IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ IR_RF1L | IR_RF0L) #define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS) /* Interrupt Line Select (ILS) */ #define ILS_ALL_INT0 0x0 #define ILS_ALL_INT1 0xFFFFFFFF /* Interrupt Line Enable (ILE) */ #define ILE_EINT0 BIT(0) #define ILE_EINT1 BIT(1) /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ #define RXFC_FWM_OFF 24 #define RXFC_FWM_MASK 0x7f #define RXFC_FWM_1 (1 << RXFC_FWM_OFF) #define RXFC_FS_OFF 16 #define RXFC_FS_MASK 0x7f /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ #define RXFS_RFL BIT(25) #define RXFS_FF BIT(24) #define RXFS_FPI_OFF 16 #define RXFS_FPI_MASK 0x3f0000 #define RXFS_FGI_OFF 8 #define RXFS_FGI_MASK 0x3f00 #define RXFS_FFL_MASK 0x7f /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ #define M_CAN_RXESC_8BYTES 0x0 #define M_CAN_RXESC_64BYTES 0x777 /* Tx Buffer Configuration(TXBC) */ #define TXBC_NDTB_OFF 16 #define TXBC_NDTB_MASK 0x3f /* Tx Buffer Element Size Configuration(TXESC) */ #define TXESC_TBDS_8BYTES 0x0 #define TXESC_TBDS_64BYTES 0x7 /* Tx Event FIFO Con.guration (TXEFC) */ #define TXEFC_EFS_OFF 16 #define TXEFC_EFS_MASK 0x3f /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 #define XIDF_ELEMENT_SIZE 8 #define RXF0_ELEMENT_SIZE 72 #define RXF1_ELEMENT_SIZE 72 #define RXB_ELEMENT_SIZE 16 #define TXE_ELEMENT_SIZE 8 #define TXB_ELEMENT_SIZE 72 /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 #define M_CAN_FIFO_DLC 0x4 #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) /* Rx Buffer Element */ /* R0 */ #define RX_BUF_ESI BIT(31) #define RX_BUF_XTD BIT(30) #define RX_BUF_RTR BIT(29) /* R1 */ #define RX_BUF_ANMF BIT(31) #define RX_BUF_EDL BIT(21) #define RX_BUF_BRS BIT(20) /* Tx Buffer Element */ /* R0 */ #define TX_BUF_XTD BIT(30) #define TX_BUF_RTR BIT(29) /* address offset and element number for each FIFO/Buffer in the Message RAM */ struct mram_cfg { u16 off; u8 num; }; /* m_can private data structure */ struct m_can_priv { struct can_priv can; /* must be the first member */ struct napi_struct napi; struct net_device *dev; struct device *device; struct clk *hclk; struct clk *cclk; void __iomem *base; u32 irqstatus; /* message ram configuration */ void __iomem *mram_base; struct mram_cfg mcfg[MRAM_CFG_NUM]; }; static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) { return readl(priv->base + reg); } static inline void m_can_write(const struct m_can_priv *priv, enum m_can_reg reg, u32 val) { writel(val, priv->base + reg); } static inline u32 m_can_fifo_read(const struct m_can_priv *priv, u32 fgi, unsigned int offset) { return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + offset); } static inline void m_can_fifo_write(const struct m_can_priv *priv, u32 fpi, unsigned int offset, u32 val) { writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + offset); } static inline void m_can_config_endisable(const struct m_can_priv *priv, bool enable) { u32 cccr = m_can_read(priv, M_CAN_CCCR); u32 timeout = 10; u32 val = 0; if (enable) { /* enable m_can configuration */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); } /* there's a delay for module initialization */ if (enable) val = CCCR_INIT | CCCR_CCE; while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { if (timeout == 0) { netdev_warn(priv->dev, "Failed to init module\n"); return; } timeout--; udelay(1); } } static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) { m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1); } static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) { m_can_write(priv, M_CAN_ILE, 0x0); } static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { struct net_device_stats *stats = &dev->stats; struct m_can_priv *priv = netdev_priv(dev); struct canfd_frame *cf; struct sk_buff *skb; u32 id, fgi, dlc; int i; /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); if (dlc & RX_BUF_EDL) skb = alloc_canfd_skb(dev, &cf); else skb = alloc_can_skb(dev, (struct can_frame **)&cf); if (!skb) { stats->rx_dropped++; return; } if (dlc & RX_BUF_EDL) cf->len = can_dlc2len((dlc >> 16) & 0x0F); else cf->len = get_can_dlc((dlc >> 16) & 0x0F); id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (id >> 18) & CAN_SFF_MASK; if (id & RX_BUF_ESI) { cf->flags |= CANFD_ESI; netdev_dbg(dev, "ESI Error\n"); } if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { if (dlc & RX_BUF_BRS) cf->flags |= CANFD_BRS; for (i = 0; i < cf->len; i += 4) *(u32 *)(cf->data + i) = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ m_can_write(priv, M_CAN_RXF0A, fgi); stats->rx_packets++; stats->rx_bytes += cf->len; netif_receive_skb(skb); } static int m_can_do_rx_poll(struct net_device *dev, int quota) { struct m_can_priv *priv = netdev_priv(dev); u32 pkts = 0; u32 rxfs; rxfs = m_can_read(priv, M_CAN_RXF0S); if (!(rxfs & RXFS_FFL_MASK)) { netdev_dbg(dev, "no messages in fifo0\n"); return 0; } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { if (rxfs & RXFS_RFL) netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); m_can_read_fifo(dev, rxfs); quota--; pkts++; rxfs = m_can_read(priv, M_CAN_RXF0S); } if (pkts) can_led_event(dev, CAN_LED_EVENT_RX); return pkts; } static int m_can_handle_lost_msg(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *frame; netdev_err(dev, "msg lost in rxf0\n"); stats->rx_errors++; stats->rx_over_errors++; skb = alloc_can_err_skb(dev, &frame); if (unlikely(!skb)) return 0; frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; netif_receive_skb(skb); return 1; } static int m_can_handle_lec_err(struct net_device *dev, enum m_can_lec_type lec_type) { struct m_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; priv->can.can_stats.bus_error++; stats->rx_errors++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); cf->data[2] |= CAN_ERR_PROT_FORM; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; } stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_receive_skb(skb); return 1; } static int __m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct m_can_priv *priv = netdev_priv(dev); unsigned int ecr; ecr = m_can_read(priv, M_CAN_ECR); bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; bec->txerr = ecr & ECR_TEC_MASK; return 0; } static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct m_can_priv *priv = netdev_priv(dev); int err; err = clk_prepare_enable(priv->hclk); if (err) return err; err = clk_prepare_enable(priv->cclk); if (err) { clk_disable_unprepare(priv->hclk); return err; } __m_can_get_berr_counter(dev, bec); clk_disable_unprepare(priv->cclk); clk_disable_unprepare(priv->hclk); return 0; } static int m_can_handle_state_change(struct net_device *dev, enum can_state new_state) { struct m_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; unsigned int ecr; switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* error warning state */ priv->can.can_stats.error_warning++; priv->can.state = CAN_STATE_ERROR_WARNING; break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ priv->can.can_stats.error_passive++; priv->can.state = CAN_STATE_ERROR_PASSIVE; break; case CAN_STATE_BUS_OFF: /* bus-off state */ priv->can.state = CAN_STATE_BUS_OFF; m_can_disable_all_interrupts(priv); priv->can.can_stats.bus_off++; can_bus_off(dev); break; default: break; } /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; __m_can_get_berr_counter(dev, &bec); switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* error warning state */ cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ cf->can_id |= CAN_ERR_CRTL; ecr = m_can_read(priv, M_CAN_ECR); if (ecr & ECR_RP) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; break; case CAN_STATE_BUS_OFF: /* bus-off state */ cf->can_id |= CAN_ERR_BUSOFF; break; default: break; } stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_receive_skb(skb); return 1; } static int m_can_handle_state_errors(struct net_device *dev, u32 psr) { struct m_can_priv *priv = netdev_priv(dev); int work_done = 0; if ((psr & PSR_EW) && (priv->can.state != CAN_STATE_ERROR_WARNING)) { netdev_dbg(dev, "entered error warning state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_WARNING); } if ((psr & PSR_EP) && (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } if ((psr & PSR_BO) && (priv->can.state != CAN_STATE_BUS_OFF)) { netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); } return work_done; } static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); if (irqstatus & IR_ELO) netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); if (irqstatus & IR_BEC) netdev_err(dev, "Bit Error Corrected\n"); if (irqstatus & IR_TOO) netdev_err(dev, "Timeout reached\n"); if (irqstatus & IR_MRAF) netdev_err(dev, "Message RAM access failure occurred\n"); } static inline bool is_lec_err(u32 psr) { psr &= LEC_UNUSED; return psr && (psr != LEC_UNUSED); } static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, u32 psr) { struct m_can_priv *priv = netdev_priv(dev); int work_done = 0; if (irqstatus & IR_RF0L) work_done += m_can_handle_lost_msg(dev); /* handle lec errors on the bus */ if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && is_lec_err(psr)) work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); /* other unproccessed error interrupts */ m_can_handle_other_err(dev, irqstatus); return work_done; } static int m_can_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; struct m_can_priv *priv = netdev_priv(dev); int work_done = 0; u32 irqstatus, psr; irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); if (!irqstatus) goto end; psr = m_can_read(priv, M_CAN_PSR); if (irqstatus & IR_ERR_STATE) work_done += m_can_handle_state_errors(dev, psr); if (irqstatus & IR_ERR_BUS) work_done += m_can_handle_bus_errors(dev, irqstatus, psr); if (irqstatus & IR_RF0N) work_done += m_can_do_rx_poll(dev, (quota - work_done)); if (work_done < quota) { napi_complete(napi); m_can_enable_all_interrupts(priv); } end: return work_done; } static irqreturn_t m_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct m_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; u32 ir; ir = m_can_read(priv, M_CAN_IR); if (!ir) return IRQ_NONE; /* ACK all irqs */ if (ir & IR_ALL_INT) m_can_write(priv, M_CAN_IR, ir); /* schedule NAPI in case of * - rx IRQ * - state change IRQ * - bus error IRQ and bus error reporting */ if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) { priv->irqstatus = ir; m_can_disable_all_interrupts(priv); napi_schedule(&priv->napi); } /* transmission complete interrupt */ if (ir & IR_TC) { stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); netif_wake_queue(dev); } return IRQ_HANDLED; } static const struct can_bittiming_const m_can_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 64, .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ .tseg2_max = 16, .sjw_max = 16, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; static const struct can_bittiming_const m_can_data_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 16, .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 32, .brp_inc = 1, }; static int m_can_set_bittiming(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; brp = bt->brp - 1; sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); m_can_write(priv, M_CAN_BTP, reg_btp); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { brp = dbt->brp - 1; sjw = dbt->sjw - 1; tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; tseg2 = dbt->phase_seg2 - 1; reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | (tseg1 << FBTR_FTSEG1_SHIFT) | (tseg2 << FBTR_FTSEG2_SHIFT); m_can_write(priv, M_CAN_FBTP, reg_btp); } return 0; } /* Configure M_CAN chip: * - set rx buffer/fifo element size * - configure rx fifo * - accept non-matching frame into fifo 0 * - configure tx buffer * - configure mode * - setup bittiming */ static void m_can_chip_config(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); u32 cccr, test; m_can_config_endisable(priv, true); /* RX Buffer/FIFO Element Size 64 bytes data field */ m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ m_can_write(priv, M_CAN_GFC, 0x0); /* only support one Tx Buffer currently */ m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | priv->mcfg[MRAM_TXB].off); /* support 64 bytes payload */ m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | priv->mcfg[MRAM_TXE].off); /* rx fifo configuration, blocking mode, fifo size 1 */ m_can_write(priv, M_CAN_RXF0C, (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) | RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off); m_can_write(priv, M_CAN_RXF1C, (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) | RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); cccr = m_can_read(priv, M_CAN_CCCR); cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | (CCCR_CME_MASK << CCCR_CME_SHIFT)); test = m_can_read(priv, M_CAN_TEST); test &= ~TEST_LBCK; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cccr |= CCCR_MON; if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { cccr |= CCCR_TEST; test |= TEST_LBCK; } if (priv->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; m_can_write(priv, M_CAN_CCCR, cccr); m_can_write(priv, M_CAN_TEST, test); /* enable interrupts */ m_can_write(priv, M_CAN_IR, IR_ALL_INT); if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC); else m_can_write(priv, M_CAN_IE, IR_ALL_INT); /* route all interrupts to INT0 */ m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); /* set bittiming params */ m_can_set_bittiming(dev); m_can_config_endisable(priv, false); } static void m_can_start(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); /* basic m_can configuration */ m_can_chip_config(dev); priv->can.state = CAN_STATE_ERROR_ACTIVE; m_can_enable_all_interrupts(priv); } static int m_can_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: m_can_start(dev); netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static void free_m_can_dev(struct net_device *dev) { free_candev(dev); } static struct net_device *alloc_m_can_dev(void) { struct net_device *dev; struct m_can_priv *priv; dev = alloc_candev(sizeof(*priv), 1); if (!dev) return NULL; priv = netdev_priv(dev); netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); priv->dev = dev; priv->can.bittiming_const = &m_can_bittiming_const; priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD; return dev; } static int m_can_open(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); int err; err = clk_prepare_enable(priv->hclk); if (err) return err; err = clk_prepare_enable(priv->cclk); if (err) goto exit_disable_hclk; /* open the can device */ err = open_candev(dev); if (err) { netdev_err(dev, "failed to open can device\n"); goto exit_disable_cclk; } /* register interrupt handler */ err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, dev); if (err < 0) { netdev_err(dev, "failed to request interrupt\n"); goto exit_irq_fail; } /* start the m_can controller */ m_can_start(dev); can_led_event(dev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(dev); return 0; exit_irq_fail: close_candev(dev); exit_disable_cclk: clk_disable_unprepare(priv->cclk); exit_disable_hclk: clk_disable_unprepare(priv->hclk); return err; } static void m_can_stop(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); /* disable all interrupts */ m_can_disable_all_interrupts(priv); clk_disable_unprepare(priv->hclk); clk_disable_unprepare(priv->cclk); /* set the state as STOPPED */ priv->can.state = CAN_STATE_STOPPED; } static int m_can_close(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&priv->napi); m_can_stop(dev); free_irq(dev->irq, dev); close_candev(dev); can_led_event(dev, CAN_LED_EVENT_STOP); return 0; } static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); struct canfd_frame *cf = (struct canfd_frame *)skb->data; u32 id, cccr; int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); if (cf->can_id & CAN_EFF_FLAG) { id = cf->can_id & CAN_EFF_MASK; id |= TX_BUF_XTD; } else { id = ((cf->can_id & CAN_SFF_MASK) << 18); } if (cf->can_id & CAN_RTR_FLAG) id |= TX_BUF_RTR; /* message ram configuration */ m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); for (i = 0; i < cf->len; i += 4) m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); can_put_echo_skb(skb, dev, 0); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { cccr = m_can_read(priv, M_CAN_CCCR); cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); if (can_is_canfd_skb(skb)) { if (cf->flags & CANFD_BRS) cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; else cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; } else { cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; } m_can_write(priv, M_CAN_CCCR, cccr); } /* enable first TX buffer to start transfer */ m_can_write(priv, M_CAN_TXBTIE, 0x1); m_can_write(priv, M_CAN_TXBAR, 0x1); return NETDEV_TX_OK; } static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, .ndo_change_mtu = can_change_mtu, }; static int register_m_can_dev(struct net_device *dev) { dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &m_can_netdev_ops; return register_candev(dev); } static int m_can_of_parse_mram(struct platform_device *pdev, struct m_can_priv *priv) { struct device_node *np = pdev->dev.of_node; struct resource *res; void __iomem *addr; u32 out_val[MRAM_CFG_LEN]; int i, start, end, ret; /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); if (!res) return -ENODEV; addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!addr) return -ENOMEM; /* get message ram configuration */ ret = of_property_read_u32_array(np, "bosch,mram-cfg", out_val, sizeof(out_val) / 4); if (ret) { dev_err(&pdev->dev, "can not get message ram configuration\n"); return -ENODEV; } priv->mram_base = addr; priv->mcfg[MRAM_SIDF].off = out_val[0]; priv->mcfg[MRAM_SIDF].num = out_val[1]; priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; priv->mcfg[MRAM_XIDF].num = out_val[2]; priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK; priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK; priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; priv->mcfg[MRAM_RXB].num = out_val[5]; priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; priv->mcfg[MRAM_TXE].num = out_val[6]; priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK; dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", priv->mram_base, priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); /* initialize the entire Message RAM in use to avoid possible * ECC/parity checksum errors when reading an uninitialized buffer */ start = priv->mcfg[MRAM_SIDF].off; end = priv->mcfg[MRAM_TXB].off + priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; for (i = start; i < end; i += 4) writel(0x0, priv->mram_base + i); return 0; } static int m_can_plat_probe(struct platform_device *pdev) { struct net_device *dev; struct m_can_priv *priv; struct resource *res; void __iomem *addr; struct clk *hclk, *cclk; int irq, ret; hclk = devm_clk_get(&pdev->dev, "hclk"); cclk = devm_clk_get(&pdev->dev, "cclk"); if (IS_ERR(hclk) || IS_ERR(cclk)) { dev_err(&pdev->dev, "no clock find\n"); return -ENODEV; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); addr = devm_ioremap_resource(&pdev->dev, res); irq = platform_get_irq_byname(pdev, "int0"); if (IS_ERR(addr) || irq < 0) return -EINVAL; /* allocate the m_can device */ dev = alloc_m_can_dev(); if (!dev) return -ENOMEM; priv = netdev_priv(dev); dev->irq = irq; priv->base = addr; priv->device = &pdev->dev; priv->hclk = hclk; priv->cclk = cclk; priv->can.clock.freq = clk_get_rate(cclk); ret = m_can_of_parse_mram(pdev, priv); if (ret) goto failed_free_dev; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); ret = register_m_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); goto failed_free_dev; } devm_can_led_init(dev); dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->base, dev->irq); return 0; failed_free_dev: free_m_can_dev(dev); return ret; } static __maybe_unused int m_can_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct m_can_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); } /* TODO: enter low power */ priv->can.state = CAN_STATE_SLEEPING; return 0; } static __maybe_unused int m_can_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct m_can_priv *priv = netdev_priv(ndev); /* TODO: exit low power */ priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { netif_device_attach(ndev); netif_start_queue(ndev); } return 0; } static void unregister_m_can_dev(struct net_device *dev) { unregister_candev(dev); } static int m_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_m_can_dev(dev); platform_set_drvdata(pdev, NULL); free_m_can_dev(dev); return 0; } static const struct dev_pm_ops m_can_pmops = { SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) }; static const struct of_device_id m_can_of_table[] = { { .compatible = "bosch,m_can", .data = NULL }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, m_can_of_table); static struct platform_driver m_can_plat_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = m_can_of_table, .pm = &m_can_pmops, }, .probe = m_can_plat_probe, .remove = m_can_plat_remove, }; module_platform_driver(m_can_plat_driver); MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
gpl-2.0
Shabbypenguin/Cayman-Island-Kernel
drivers/ata/libata-core.c
512
177307
/* * libata-core.c - helper library for ATA * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available from http://www.t13.org/ and * http://www.sata-io.org/ * * Standards documents from: * http://www.t13.org (ATA standards, PCI DMA IDE spec) * http://www.t10.org (SCSI MMC - for ATAPI MMC) * http://www.sata-io.org (SATA) * http://www.compactflash.org (CF) * http://www.qic.org (QIC157 - Tape and DSC) * http://www.ce-ata.org (CE-ATA: not supported) * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/suspend.h> #include <linux/workqueue.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <linux/async.h> #include <linux/log2.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <asm/byteorder.h> #include <linux/cdrom.h> #include <linux/ratelimit.h> #include "libata.h" /* debounce timing parameters in msecs { interval, duration, timeout } */ const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; const struct ata_port_operations ata_base_port_ops = { .prereset = ata_std_prereset, .postreset = ata_std_postreset, .error_handler = ata_std_error_handler, }; const struct ata_port_operations sata_port_ops = { .inherits = &ata_base_port_ops, .qc_defer = ata_std_qc_defer, .hardreset = sata_std_hardreset, }; static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); static unsigned int ata_dev_set_xfermode(struct ata_device *dev); static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature); static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); unsigned int ata_print_id = 1; struct workqueue_struct *ata_aux_wq; struct ata_force_param { const char *name; unsigned int cbl; int spd_limit; unsigned long xfer_mask; unsigned int horkage_on; unsigned int horkage_off; unsigned int lflags; }; struct ata_force_ent { int port; int device; struct ata_force_param param; }; static struct ata_force_ent *ata_force_tbl; static int ata_force_tbl_size; static char ata_force_param_buf[PAGE_SIZE] __initdata; /* param_buf is thrown away after initialization, disallow read */ module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); static int atapi_enabled = 1; module_param(atapi_enabled, int, 0444); MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); static int atapi_dmadir = 0; module_param(atapi_dmadir, int, 0444); MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); int atapi_passthru16 = 1; module_param(atapi_passthru16, int, 0444); MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); int libata_fua = 0; module_param_named(fua, libata_fua, int, 0444); MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); static int ata_ignore_hpa; module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; module_param_named(dma, libata_dma_mask, int, 0444); MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); static int ata_probe_timeout; module_param(ata_probe_timeout, int, 0444); MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); int libata_noacpi = 0; module_param_named(noacpi, libata_noacpi, int, 0444); MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); int libata_allow_tpm = 0; module_param_named(allow_tpm, libata_allow_tpm, int, 0444); MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); static int atapi_an; module_param(atapi_an, int, 0444); MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static bool ata_sstatus_online(u32 sstatus) { return (sstatus & 0xf) == 0x3; } /** * ata_link_next - link iteration helper * @link: the previous link, NULL to start * @ap: ATA port containing links to iterate * @mode: iteration mode, one of ATA_LITER_* * * LOCKING: * Host lock or EH context. * * RETURNS: * Pointer to the next link. */ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, enum ata_link_iter_mode mode) { BUG_ON(mode != ATA_LITER_EDGE && mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); /* NULL link indicates start of iteration */ if (!link) switch (mode) { case ATA_LITER_EDGE: case ATA_LITER_PMP_FIRST: if (sata_pmp_attached(ap)) return ap->pmp_link; /* fall through */ case ATA_LITER_HOST_FIRST: return &ap->link; } /* we just iterated over the host link, what's next? */ if (link == &ap->link) switch (mode) { case ATA_LITER_HOST_FIRST: if (sata_pmp_attached(ap)) return ap->pmp_link; /* fall through */ case ATA_LITER_PMP_FIRST: if (unlikely(ap->slave_link)) return ap->slave_link; /* fall through */ case ATA_LITER_EDGE: return NULL; } /* slave_link excludes PMP */ if (unlikely(link == ap->slave_link)) return NULL; /* we were over a PMP link */ if (++link < ap->pmp_link + ap->nr_pmp_links) return link; if (mode == ATA_LITER_PMP_FIRST) return &ap->link; return NULL; } /** * ata_dev_next - device iteration helper * @dev: the previous device, NULL to start * @link: ATA link containing devices to iterate * @mode: iteration mode, one of ATA_DITER_* * * LOCKING: * Host lock or EH context. * * RETURNS: * Pointer to the next device. */ struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, enum ata_dev_iter_mode mode) { BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); /* NULL dev indicates start of iteration */ if (!dev) switch (mode) { case ATA_DITER_ENABLED: case ATA_DITER_ALL: dev = link->device; goto check; case ATA_DITER_ENABLED_REVERSE: case ATA_DITER_ALL_REVERSE: dev = link->device + ata_link_max_devices(link) - 1; goto check; } next: /* move to the next one */ switch (mode) { case ATA_DITER_ENABLED: case ATA_DITER_ALL: if (++dev < link->device + ata_link_max_devices(link)) goto check; return NULL; case ATA_DITER_ENABLED_REVERSE: case ATA_DITER_ALL_REVERSE: if (--dev >= link->device) goto check; return NULL; } check: if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && !ata_dev_enabled(dev)) goto next; return dev; } /** * ata_dev_phys_link - find physical link for a device * @dev: ATA device to look up physical link for * * Look up physical link which @dev is attached to. Note that * this is different from @dev->link only when @dev is on slave * link. For all other cases, it's the same as @dev->link. * * LOCKING: * Don't care. * * RETURNS: * Pointer to the found physical link. */ struct ata_link *ata_dev_phys_link(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; if (!ap->slave_link) return dev->link; if (!dev->devno) return &ap->link; return ap->slave_link; } /** * ata_force_cbl - force cable type according to libata.force * @ap: ATA port of interest * * Force cable type according to libata.force and whine about it. * The last entry which has matching port number is used, so it * can be specified as part of device force parameters. For * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the * same effect. * * LOCKING: * EH context. */ void ata_force_cbl(struct ata_port *ap) { int i; for (i = ata_force_tbl_size - 1; i >= 0; i--) { const struct ata_force_ent *fe = &ata_force_tbl[i]; if (fe->port != -1 && fe->port != ap->print_id) continue; if (fe->param.cbl == ATA_CBL_NONE) continue; ap->cbl = fe->param.cbl; ata_port_printk(ap, KERN_NOTICE, "FORCE: cable set to %s\n", fe->param.name); return; } } /** * ata_force_link_limits - force link limits according to libata.force * @link: ATA link of interest * * Force link flags and SATA spd limit according to libata.force * and whine about it. When only the port part is specified * (e.g. 1:), the limit applies to all links connected to both * the host link and all fan-out ports connected via PMP. If the * device part is specified as 0 (e.g. 1.00:), it specifies the * first fan-out link not the host link. Device number 15 always * points to the host link whether PMP is attached or not. If the * controller has slave link, device number 16 points to it. * * LOCKING: * EH context. */ static void ata_force_link_limits(struct ata_link *link) { bool did_spd = false; int linkno = link->pmp; int i; if (ata_is_host_link(link)) linkno += 15; for (i = ata_force_tbl_size - 1; i >= 0; i--) { const struct ata_force_ent *fe = &ata_force_tbl[i]; if (fe->port != -1 && fe->port != link->ap->print_id) continue; if (fe->device != -1 && fe->device != linkno) continue; /* only honor the first spd limit */ if (!did_spd && fe->param.spd_limit) { link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; ata_link_printk(link, KERN_NOTICE, "FORCE: PHY spd limit set to %s\n", fe->param.name); did_spd = true; } /* let lflags stack */ if (fe->param.lflags) { link->flags |= fe->param.lflags; ata_link_printk(link, KERN_NOTICE, "FORCE: link flag 0x%x forced -> 0x%x\n", fe->param.lflags, link->flags); } } } /** * ata_force_xfermask - force xfermask according to libata.force * @dev: ATA device of interest * * Force xfer_mask according to libata.force and whine about it. * For consistency with link selection, device number 15 selects * the first device connected to the host link. * * LOCKING: * EH context. */ static void ata_force_xfermask(struct ata_device *dev) { int devno = dev->link->pmp + dev->devno; int alt_devno = devno; int i; /* allow n.15/16 for devices attached to host port */ if (ata_is_host_link(dev->link)) alt_devno += 15; for (i = ata_force_tbl_size - 1; i >= 0; i--) { const struct ata_force_ent *fe = &ata_force_tbl[i]; unsigned long pio_mask, mwdma_mask, udma_mask; if (fe->port != -1 && fe->port != dev->link->ap->print_id) continue; if (fe->device != -1 && fe->device != devno && fe->device != alt_devno) continue; if (!fe->param.xfer_mask) continue; ata_unpack_xfermask(fe->param.xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); if (udma_mask) dev->udma_mask = udma_mask; else if (mwdma_mask) { dev->udma_mask = 0; dev->mwdma_mask = mwdma_mask; } else { dev->udma_mask = 0; dev->mwdma_mask = 0; dev->pio_mask = pio_mask; } ata_dev_printk(dev, KERN_NOTICE, "FORCE: xfer_mask set to %s\n", fe->param.name); return; } } /** * ata_force_horkage - force horkage according to libata.force * @dev: ATA device of interest * * Force horkage according to libata.force and whine about it. * For consistency with link selection, device number 15 selects * the first device connected to the host link. * * LOCKING: * EH context. */ static void ata_force_horkage(struct ata_device *dev) { int devno = dev->link->pmp + dev->devno; int alt_devno = devno; int i; /* allow n.15/16 for devices attached to host port */ if (ata_is_host_link(dev->link)) alt_devno += 15; for (i = 0; i < ata_force_tbl_size; i++) { const struct ata_force_ent *fe = &ata_force_tbl[i]; if (fe->port != -1 && fe->port != dev->link->ap->print_id) continue; if (fe->device != -1 && fe->device != devno && fe->device != alt_devno) continue; if (!(~dev->horkage & fe->param.horkage_on) && !(dev->horkage & fe->param.horkage_off)) continue; dev->horkage |= fe->param.horkage_on; dev->horkage &= ~fe->param.horkage_off; ata_dev_printk(dev, KERN_NOTICE, "FORCE: horkage modified (%s)\n", fe->param.name); } } /** * atapi_cmd_type - Determine ATAPI command type from SCSI opcode * @opcode: SCSI opcode * * Determine ATAPI command type from @opcode. * * LOCKING: * None. * * RETURNS: * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} */ int atapi_cmd_type(u8 opcode) { switch (opcode) { case GPCMD_READ_10: case GPCMD_READ_12: return ATAPI_READ; case GPCMD_WRITE_10: case GPCMD_WRITE_12: case GPCMD_WRITE_AND_VERIFY_10: return ATAPI_WRITE; case GPCMD_READ_CD: case GPCMD_READ_CD_MSF: return ATAPI_READ_CD; case ATA_16: case ATA_12: if (atapi_passthru16) return ATAPI_PASS_THRU; /* fall thru */ default: return ATAPI_MISC; } } /** * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure * @tf: Taskfile to convert * @pmp: Port multiplier port * @is_cmd: This FIS is for command * @fis: Buffer into which data will output * * Converts a standard ATA taskfile to a Serial ATA * FIS structure (Register - Host to Device). * * LOCKING: * Inherited from caller. */ void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) { fis[0] = 0x27; /* Register - Host to Device FIS */ fis[1] = pmp & 0xf; /* Port multiplier number*/ if (is_cmd) fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ fis[2] = tf->command; fis[3] = tf->feature; fis[4] = tf->lbal; fis[5] = tf->lbam; fis[6] = tf->lbah; fis[7] = tf->device; fis[8] = tf->hob_lbal; fis[9] = tf->hob_lbam; fis[10] = tf->hob_lbah; fis[11] = tf->hob_feature; fis[12] = tf->nsect; fis[13] = tf->hob_nsect; fis[14] = 0; fis[15] = tf->ctl; fis[16] = 0; fis[17] = 0; fis[18] = 0; fis[19] = 0; } /** * ata_tf_from_fis - Convert SATA FIS to ATA taskfile * @fis: Buffer from which data will be input * @tf: Taskfile to output * * Converts a serial ATA FIS structure to a standard ATA taskfile. * * LOCKING: * Inherited from caller. */ void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) { tf->command = fis[2]; /* status */ tf->feature = fis[3]; /* error */ tf->lbal = fis[4]; tf->lbam = fis[5]; tf->lbah = fis[6]; tf->device = fis[7]; tf->hob_lbal = fis[8]; tf->hob_lbam = fis[9]; tf->hob_lbah = fis[10]; tf->nsect = fis[12]; tf->hob_nsect = fis[13]; } static const u8 ata_rw_cmds[] = { /* pio multi */ ATA_CMD_READ_MULTI, ATA_CMD_WRITE_MULTI, ATA_CMD_READ_MULTI_EXT, ATA_CMD_WRITE_MULTI_EXT, 0, 0, 0, ATA_CMD_WRITE_MULTI_FUA_EXT, /* pio */ ATA_CMD_PIO_READ, ATA_CMD_PIO_WRITE, ATA_CMD_PIO_READ_EXT, ATA_CMD_PIO_WRITE_EXT, 0, 0, 0, 0, /* dma */ ATA_CMD_READ, ATA_CMD_WRITE, ATA_CMD_READ_EXT, ATA_CMD_WRITE_EXT, 0, 0, 0, ATA_CMD_WRITE_FUA_EXT }; /** * ata_rwcmd_protocol - set taskfile r/w commands and protocol * @tf: command to examine and configure * @dev: device tf belongs to * * Examine the device configuration and tf->flags to calculate * the proper read/write commands and protocol to use. * * LOCKING: * caller. */ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) { u8 cmd; int index, fua, lba48, write; fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; if (dev->flags & ATA_DFLAG_PIO) { tf->protocol = ATA_PROT_PIO; index = dev->multi_count ? 0 : 8; } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { /* Unable to use DMA due to host limitation */ tf->protocol = ATA_PROT_PIO; index = dev->multi_count ? 0 : 8; } else { tf->protocol = ATA_PROT_DMA; index = 16; } cmd = ata_rw_cmds[index + fua + lba48 + write]; if (cmd) { tf->command = cmd; return 0; } return -1; } /** * ata_tf_read_block - Read block address from ATA taskfile * @tf: ATA taskfile of interest * @dev: ATA device @tf belongs to * * LOCKING: * None. * * Read block address from @tf. This function can handle all * three address formats - LBA, LBA48 and CHS. tf->protocol and * flags select the address format to use. * * RETURNS: * Block address read from @tf. */ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) { u64 block = 0; if (tf->flags & ATA_TFLAG_LBA) { if (tf->flags & ATA_TFLAG_LBA48) { block |= (u64)tf->hob_lbah << 40; block |= (u64)tf->hob_lbam << 32; block |= (u64)tf->hob_lbal << 24; } else block |= (tf->device & 0xf) << 24; block |= tf->lbah << 16; block |= tf->lbam << 8; block |= tf->lbal; } else { u32 cyl, head, sect; cyl = tf->lbam | (tf->lbah << 8); head = tf->device & 0xf; sect = tf->lbal; if (!sect) { ata_dev_printk(dev, KERN_WARNING, "device reported " "invalid CHS sector 0\n"); sect = 1; /* oh well */ } block = (cyl * dev->heads + head) * dev->sectors + sect - 1; } return block; } /** * ata_build_rw_tf - Build ATA taskfile for given read/write request * @tf: Target ATA taskfile * @dev: ATA device @tf belongs to * @block: Block address * @n_block: Number of blocks * @tf_flags: RW/FUA etc... * @tag: tag * * LOCKING: * None. * * Build ATA taskfile @tf for read/write request described by * @block, @n_block, @tf_flags and @tag on @dev. * * RETURNS: * * 0 on success, -ERANGE if the request is too large for @dev, * -EINVAL if the request is invalid. */ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, u64 block, u32 n_block, unsigned int tf_flags, unsigned int tag) { tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->flags |= tf_flags; if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { /* yay, NCQ */ if (!lba_48_ok(block, n_block)) return -ERANGE; tf->protocol = ATA_PROT_NCQ; tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; if (tf->flags & ATA_TFLAG_WRITE) tf->command = ATA_CMD_FPDMA_WRITE; else tf->command = ATA_CMD_FPDMA_READ; tf->nsect = tag << 3; tf->hob_feature = (n_block >> 8) & 0xff; tf->feature = n_block & 0xff; tf->hob_lbah = (block >> 40) & 0xff; tf->hob_lbam = (block >> 32) & 0xff; tf->hob_lbal = (block >> 24) & 0xff; tf->lbah = (block >> 16) & 0xff; tf->lbam = (block >> 8) & 0xff; tf->lbal = block & 0xff; tf->device = 1 << 6; if (tf->flags & ATA_TFLAG_FUA) tf->device |= 1 << 7; } else if (dev->flags & ATA_DFLAG_LBA) { tf->flags |= ATA_TFLAG_LBA; if (lba_28_ok(block, n_block)) { /* use LBA28 */ tf->device |= (block >> 24) & 0xf; } else if (lba_48_ok(block, n_block)) { if (!(dev->flags & ATA_DFLAG_LBA48)) return -ERANGE; /* use LBA48 */ tf->flags |= ATA_TFLAG_LBA48; tf->hob_nsect = (n_block >> 8) & 0xff; tf->hob_lbah = (block >> 40) & 0xff; tf->hob_lbam = (block >> 32) & 0xff; tf->hob_lbal = (block >> 24) & 0xff; } else /* request too large even for LBA48 */ return -ERANGE; if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) return -EINVAL; tf->nsect = n_block & 0xff; tf->lbah = (block >> 16) & 0xff; tf->lbam = (block >> 8) & 0xff; tf->lbal = block & 0xff; tf->device |= ATA_LBA; } else { /* CHS */ u32 sect, head, cyl, track; /* The request -may- be too large for CHS addressing. */ if (!lba_28_ok(block, n_block)) return -ERANGE; if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) return -EINVAL; /* Convert LBA to CHS */ track = (u32)block / dev->sectors; cyl = track / dev->heads; head = track % dev->heads; sect = (u32)block % dev->sectors + 1; DPRINTK("block %u track %u cyl %u head %u sect %u\n", (u32)block, track, cyl, head, sect); /* Check whether the converted CHS can fit. Cylinder: 0-65535 Head: 0-15 Sector: 1-255*/ if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) return -ERANGE; tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ tf->lbal = sect; tf->lbam = cyl; tf->lbah = cyl >> 8; tf->device |= head; } return 0; } /** * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask * @pio_mask: pio_mask * @mwdma_mask: mwdma_mask * @udma_mask: udma_mask * * Pack @pio_mask, @mwdma_mask and @udma_mask into a single * unsigned int xfer_mask. * * LOCKING: * None. * * RETURNS: * Packed xfer_mask. */ unsigned long ata_pack_xfermask(unsigned long pio_mask, unsigned long mwdma_mask, unsigned long udma_mask) { return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); } /** * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks * @xfer_mask: xfer_mask to unpack * @pio_mask: resulting pio_mask * @mwdma_mask: resulting mwdma_mask * @udma_mask: resulting udma_mask * * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. * Any NULL distination masks will be ignored. */ void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, unsigned long *mwdma_mask, unsigned long *udma_mask) { if (pio_mask) *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; if (mwdma_mask) *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; if (udma_mask) *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; } static const struct ata_xfer_ent { int shift, bits; u8 base; } ata_xfer_tbl[] = { { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, { -1, }, }; /** * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask * @xfer_mask: xfer_mask of interest * * Return matching XFER_* value for @xfer_mask. Only the highest * bit of @xfer_mask is considered. * * LOCKING: * None. * * RETURNS: * Matching XFER_* value, 0xff if no match found. */ u8 ata_xfer_mask2mode(unsigned long xfer_mask) { int highbit = fls(xfer_mask) - 1; const struct ata_xfer_ent *ent; for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) if (highbit >= ent->shift && highbit < ent->shift + ent->bits) return ent->base + highbit - ent->shift; return 0xff; } /** * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* * @xfer_mode: XFER_* of interest * * Return matching xfer_mask for @xfer_mode. * * LOCKING: * None. * * RETURNS: * Matching xfer_mask, 0 if no match found. */ unsigned long ata_xfer_mode2mask(u8 xfer_mode) { const struct ata_xfer_ent *ent; for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) & ~((1 << ent->shift) - 1); return 0; } /** * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* * @xfer_mode: XFER_* of interest * * Return matching xfer_shift for @xfer_mode. * * LOCKING: * None. * * RETURNS: * Matching xfer_shift, -1 if no match found. */ int ata_xfer_mode2shift(unsigned long xfer_mode) { const struct ata_xfer_ent *ent; for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) return ent->shift; return -1; } /** * ata_mode_string - convert xfer_mask to string * @xfer_mask: mask of bits supported; only highest bit counts. * * Determine string which represents the highest speed * (highest bit in @modemask). * * LOCKING: * None. * * RETURNS: * Constant C string representing highest speed listed in * @mode_mask, or the constant C string "<n/a>". */ const char *ata_mode_string(unsigned long xfer_mask) { static const char * const xfer_mode_str[] = { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5", "PIO6", "MWDMA0", "MWDMA1", "MWDMA2", "MWDMA3", "MWDMA4", "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7", }; int highbit; highbit = fls(xfer_mask) - 1; if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) return xfer_mode_str[highbit]; return "<n/a>"; } static const char *sata_spd_string(unsigned int spd) { static const char * const spd_str[] = { "1.5 Gbps", "3.0 Gbps", "6.0 Gbps", }; if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) return "<unknown>"; return spd_str[spd - 1]; } static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; u32 scontrol; unsigned int err_mask; int rc; /* * disallow DIPM for drivers which haven't set * ATA_FLAG_IPM. This is because when DIPM is enabled, * phy ready will be set in the interrupt status on * state changes, which will cause some drivers to * think there are errors - additionally drivers will * need to disable hot plug. */ if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { ap->pm_policy = NOT_AVAILABLE; return -EINVAL; } /* * For DIPM, we will only enable it for the * min_power setting. * * Why? Because Disks are too stupid to know that * If the host rejects a request to go to SLUMBER * they should retry at PARTIAL, and instead it * just would give up. So, for medium_power to * work at all, we need to only allow HIPM. */ rc = sata_scr_read(link, SCR_CONTROL, &scontrol); if (rc) return rc; switch (policy) { case MIN_POWER: /* no restrictions on IPM transitions */ scontrol &= ~(0x3 << 8); rc = sata_scr_write(link, SCR_CONTROL, scontrol); if (rc) return rc; /* enable DIPM */ if (dev->flags & ATA_DFLAG_DIPM) err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_DIPM); break; case MEDIUM_POWER: /* allow IPM to PARTIAL */ scontrol &= ~(0x1 << 8); scontrol |= (0x2 << 8); rc = sata_scr_write(link, SCR_CONTROL, scontrol); if (rc) return rc; /* * we don't have to disable DIPM since IPM flags * disallow transitions to SLUMBER, which effectively * disable DIPM if it does not support PARTIAL */ break; case NOT_AVAILABLE: case MAX_PERFORMANCE: /* disable all IPM transitions */ scontrol |= (0x3 << 8); rc = sata_scr_write(link, SCR_CONTROL, scontrol); if (rc) return rc; /* * we don't have to disable DIPM since IPM flags * disallow all transitions which effectively * disable DIPM anyway. */ break; } /* FIXME: handle SET FEATURES failure */ (void) err_mask; return 0; } /** * ata_dev_enable_pm - enable SATA interface power management * @dev: device to enable power management * @policy: the link power management policy * * Enable SATA Interface power management. This will enable * Device Interface Power Management (DIPM) for min_power * policy, and then call driver specific callbacks for * enabling Host Initiated Power management. * * Locking: Caller. * Returns: -EINVAL if IPM is not supported, 0 otherwise. */ void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) { int rc = 0; struct ata_port *ap = dev->link->ap; /* set HIPM first, then DIPM */ if (ap->ops->enable_pm) rc = ap->ops->enable_pm(ap, policy); if (rc) goto enable_pm_out; rc = ata_dev_set_dipm(dev, policy); enable_pm_out: if (rc) ap->pm_policy = MAX_PERFORMANCE; else ap->pm_policy = policy; return /* rc */; /* hopefully we can use 'rc' eventually */ } #ifdef CONFIG_PM /** * ata_dev_disable_pm - disable SATA interface power management * @dev: device to disable power management * * Disable SATA Interface power management. This will disable * Device Interface Power Management (DIPM) without changing * policy, call driver specific callbacks for disabling Host * Initiated Power management. * * Locking: Caller. * Returns: void */ static void ata_dev_disable_pm(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; ata_dev_set_dipm(dev, MAX_PERFORMANCE); if (ap->ops->disable_pm) ap->ops->disable_pm(ap); } #endif /* CONFIG_PM */ void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) { ap->pm_policy = policy; ap->link.eh_info.action |= ATA_EH_LPM; ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; ata_port_schedule_eh(ap); } #ifdef CONFIG_PM static void ata_lpm_enable(struct ata_host *host) { struct ata_link *link; struct ata_port *ap; struct ata_device *dev; int i; for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) ata_dev_disable_pm(dev); } } } static void ata_lpm_disable(struct ata_host *host) { int i; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_lpm_schedule(ap, ap->pm_policy); } } #endif /* CONFIG_PM */ /** * ata_dev_classify - determine device type based on ATA-spec signature * @tf: ATA taskfile register set for device to be identified * * Determine from taskfile register contents whether a device is * ATA or ATAPI, as per "Signature and persistence" section * of ATA/PI spec (volume 1, sect 5.14). * * LOCKING: * None. * * RETURNS: * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or * %ATA_DEV_UNKNOWN the event of failure. */ unsigned int ata_dev_classify(const struct ata_taskfile *tf) { /* Apple's open source Darwin code hints that some devices only * put a proper signature into the LBA mid/high registers, * So, we only check those. It's sufficient for uniqueness. * * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate * signatures for ATA and ATAPI devices attached on SerialATA, * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA * spec has never mentioned about using different signatures * for ATA/ATAPI devices. Then, Serial ATA II: Port * Multiplier specification began to use 0x69/0x96 to identify * port multpliers and 0x3c/0xc3 to identify SEMB device. * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and * 0x69/0x96 shortly and described them as reserved for * SerialATA. * * We follow the current spec and consider that 0x69/0x96 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports * SEMB signature. This is worked around in * ata_dev_read_id(). */ if ((tf->lbam == 0) && (tf->lbah == 0)) { DPRINTK("found ATA device by sig\n"); return ATA_DEV_ATA; } if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { DPRINTK("found ATAPI device by sig\n"); return ATA_DEV_ATAPI; } if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { DPRINTK("found PMP device by sig\n"); return ATA_DEV_PMP; } if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { DPRINTK("found SEMB device by sig (could be ATA device)\n"); return ATA_DEV_SEMB; } DPRINTK("unknown device\n"); return ATA_DEV_UNKNOWN; } /** * ata_id_string - Convert IDENTIFY DEVICE page into string * @id: IDENTIFY DEVICE results we will examine * @s: string into which data is output * @ofs: offset into identify device page * @len: length of string to return. must be an even number. * * The strings in the IDENTIFY DEVICE page are broken up into * 16-bit chunks. Run through the string, and output each * 8-bit chunk linearly, regardless of platform. * * LOCKING: * caller. */ void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned int c; BUG_ON(len & 1); while (len > 0) { c = id[ofs] >> 8; *s = c; s++; c = id[ofs] & 0xff; *s = c; s++; ofs++; len -= 2; } } /** * ata_id_c_string - Convert IDENTIFY DEVICE page into C string * @id: IDENTIFY DEVICE results we will examine * @s: string into which data is output * @ofs: offset into identify device page * @len: length of string to return. must be an odd number. * * This function is identical to ata_id_string except that it * trims trailing spaces and terminates the resulting string with * null. @len must be actual maximum length (even number) + 1. * * LOCKING: * caller. */ void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned char *p; ata_id_string(id, s, ofs, len - 1); p = s + strnlen(s, len - 1); while (p > s && p[-1] == ' ') p--; *p = '\0'; } static u64 ata_id_n_sectors(const u16 *id) { if (ata_id_has_lba(id)) { if (ata_id_has_lba48(id)) return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); else return ata_id_u32(id, ATA_ID_LBA_CAPACITY); } else { if (ata_id_current_chs_valid(id)) return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * id[ATA_ID_CUR_SECTORS]; else return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS]; } } u64 ata_tf_to_lba48(const struct ata_taskfile *tf) { u64 sectors = 0; sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; sectors |= (tf->lbah & 0xff) << 16; sectors |= (tf->lbam & 0xff) << 8; sectors |= (tf->lbal & 0xff); return sectors; } u64 ata_tf_to_lba(const struct ata_taskfile *tf) { u64 sectors = 0; sectors |= (tf->device & 0x0f) << 24; sectors |= (tf->lbah & 0xff) << 16; sectors |= (tf->lbam & 0xff) << 8; sectors |= (tf->lbal & 0xff); return sectors; } /** * ata_read_native_max_address - Read native max address * @dev: target device * @max_sectors: out parameter for the result native max address * * Perform an LBA48 or LBA28 native size query upon the device in * question. * * RETURNS: * 0 on success, -EACCES if command is aborted by the drive. * -EIO on other errors. */ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) { unsigned int err_mask; struct ata_taskfile tf; int lba48 = ata_id_has_lba48(dev->id); ata_tf_init(dev, &tf); /* always clear all address registers */ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; if (lba48) { tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; tf.flags |= ATA_TFLAG_LBA48; } else tf.command = ATA_CMD_READ_NATIVE_MAX; tf.protocol |= ATA_PROT_NODATA; tf.device |= ATA_LBA; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (err_mask) { ata_dev_printk(dev, KERN_WARNING, "failed to read native " "max address (err_mask=0x%x)\n", err_mask); if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) return -EACCES; return -EIO; } if (lba48) *max_sectors = ata_tf_to_lba48(&tf) + 1; else *max_sectors = ata_tf_to_lba(&tf) + 1; if (dev->horkage & ATA_HORKAGE_HPA_SIZE) (*max_sectors)--; return 0; } /** * ata_set_max_sectors - Set max sectors * @dev: target device * @new_sectors: new max sectors value to set for the device * * Set max sectors of @dev to @new_sectors. * * RETURNS: * 0 on success, -EACCES if command is aborted or denied (due to * previous non-volatile SET_MAX) by the drive. -EIO on other * errors. */ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) { unsigned int err_mask; struct ata_taskfile tf; int lba48 = ata_id_has_lba48(dev->id); new_sectors--; ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; if (lba48) { tf.command = ATA_CMD_SET_MAX_EXT; tf.flags |= ATA_TFLAG_LBA48; tf.hob_lbal = (new_sectors >> 24) & 0xff; tf.hob_lbam = (new_sectors >> 32) & 0xff; tf.hob_lbah = (new_sectors >> 40) & 0xff; } else { tf.command = ATA_CMD_SET_MAX; tf.device |= (new_sectors >> 24) & 0xf; } tf.protocol |= ATA_PROT_NODATA; tf.device |= ATA_LBA; tf.lbal = (new_sectors >> 0) & 0xff; tf.lbam = (new_sectors >> 8) & 0xff; tf.lbah = (new_sectors >> 16) & 0xff; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (err_mask) { ata_dev_printk(dev, KERN_WARNING, "failed to set " "max address (err_mask=0x%x)\n", err_mask); if (err_mask == AC_ERR_DEV && (tf.feature & (ATA_ABORTED | ATA_IDNF))) return -EACCES; return -EIO; } return 0; } /** * ata_hpa_resize - Resize a device with an HPA set * @dev: Device to resize * * Read the size of an LBA28 or LBA48 disk with HPA features and resize * it if required to the full size of the media. The caller must check * the drive has the HPA feature set enabled. * * RETURNS: * 0 on success, -errno on failure. */ static int ata_hpa_resize(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; u64 sectors = ata_id_n_sectors(dev->id); u64 native_sectors; int rc; /* do we need to do it? */ if (dev->class != ATA_DEV_ATA || !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) return 0; /* read native max address */ rc = ata_read_native_max_address(dev, &native_sectors); if (rc) { /* If device aborted the command or HPA isn't going to * be unlocked, skip HPA resizing. */ if (rc == -EACCES || !unlock_hpa) { ata_dev_printk(dev, KERN_WARNING, "HPA support seems " "broken, skipping HPA handling\n"); dev->horkage |= ATA_HORKAGE_BROKEN_HPA; /* we can continue if device aborted the command */ if (rc == -EACCES) rc = 0; } return rc; } dev->n_native_sectors = native_sectors; /* nothing to do? */ if (native_sectors <= sectors || !unlock_hpa) { if (!print_info || native_sectors == sectors) return 0; if (native_sectors > sectors) ata_dev_printk(dev, KERN_INFO, "HPA detected: current %llu, native %llu\n", (unsigned long long)sectors, (unsigned long long)native_sectors); else if (native_sectors < sectors) ata_dev_printk(dev, KERN_WARNING, "native sectors (%llu) is smaller than " "sectors (%llu)\n", (unsigned long long)native_sectors, (unsigned long long)sectors); return 0; } /* let's unlock HPA */ rc = ata_set_max_sectors(dev, native_sectors); if (rc == -EACCES) { /* if device aborted the command, skip HPA resizing */ ata_dev_printk(dev, KERN_WARNING, "device aborted resize " "(%llu -> %llu), skipping HPA handling\n", (unsigned long long)sectors, (unsigned long long)native_sectors); dev->horkage |= ATA_HORKAGE_BROKEN_HPA; return 0; } else if (rc) return rc; /* re-read IDENTIFY data */ rc = ata_dev_reread_id(dev, 0); if (rc) { ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " "data after HPA resizing\n"); return rc; } if (print_info) { u64 new_sectors = ata_id_n_sectors(dev->id); ata_dev_printk(dev, KERN_INFO, "HPA unlocked: %llu -> %llu, native %llu\n", (unsigned long long)sectors, (unsigned long long)new_sectors, (unsigned long long)native_sectors); } return 0; } /** * ata_dump_id - IDENTIFY DEVICE info debugging output * @id: IDENTIFY DEVICE page to dump * * Dump selected 16-bit words from the given IDENTIFY DEVICE * page. * * LOCKING: * caller. */ static inline void ata_dump_id(const u16 *id) { DPRINTK("49==0x%04x " "53==0x%04x " "63==0x%04x " "64==0x%04x " "75==0x%04x \n", id[49], id[53], id[63], id[64], id[75]); DPRINTK("80==0x%04x " "81==0x%04x " "82==0x%04x " "83==0x%04x " "84==0x%04x \n", id[80], id[81], id[82], id[83], id[84]); DPRINTK("88==0x%04x " "93==0x%04x\n", id[88], id[93]); } /** * ata_id_xfermask - Compute xfermask from the given IDENTIFY data * @id: IDENTIFY data to compute xfer mask from * * Compute the xfermask for this device. This is not as trivial * as it seems if we must consider early devices correctly. * * FIXME: pre IDE drive timing (do we care ?). * * LOCKING: * None. * * RETURNS: * Computed xfermask */ unsigned long ata_id_xfermask(const u16 *id) { unsigned long pio_mask, mwdma_mask, udma_mask; /* Usual case. Word 53 indicates word 64 is valid */ if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { pio_mask = id[ATA_ID_PIO_MODES] & 0x03; pio_mask <<= 3; pio_mask |= 0x7; } else { /* If word 64 isn't valid then Word 51 high byte holds * the PIO timing number for the maximum. Turn it into * a mask. */ u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; if (mode < 5) /* Valid PIO range */ pio_mask = (2 << mode) - 1; else pio_mask = 1; /* But wait.. there's more. Design your standards by * committee and you too can get a free iordy field to * process. However its the speeds not the modes that * are supported... Note drivers using the timing API * will get this right anyway */ } mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; if (ata_id_is_cfa(id)) { /* * Process compact flash extended modes */ int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; if (pio) pio_mask |= (1 << 5); if (pio > 1) pio_mask |= (1 << 6); if (dma) mwdma_mask |= (1 << 3); if (dma > 1) mwdma_mask |= (1 << 4); } udma_mask = 0; if (id[ATA_ID_FIELD_VALID] & (1 << 2)) udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); } static void ata_qc_complete_internal(struct ata_queued_cmd *qc) { struct completion *waiting = qc->private_data; complete(waiting); } /** * ata_exec_internal_sg - execute libata internal command * @dev: Device to which the command is sent * @tf: Taskfile registers for the command and the result * @cdb: CDB for packet command * @dma_dir: Data tranfer direction of the command * @sgl: sg list for the data buffer of the command * @n_elem: Number of sg entries * @timeout: Timeout in msecs (0 for default) * * Executes libata internal command with timeout. @tf contains * command on entry and result on return. Timeout and error * conditions are reported via return value. No recovery action * is taken after a command times out. It's caller's duty to * clean up after timeout. * * LOCKING: * None. Should be called with kernel context, might sleep. * * RETURNS: * Zero on success, AC_ERR_* mask on failure */ unsigned ata_exec_internal_sg(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, int dma_dir, struct scatterlist *sgl, unsigned int n_elem, unsigned long timeout) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; u8 command = tf->command; int auto_timeout = 0; struct ata_queued_cmd *qc; unsigned int tag, preempted_tag; u32 preempted_sactive, preempted_qc_active; int preempted_nr_active_links; DECLARE_COMPLETION_ONSTACK(wait); unsigned long flags; unsigned int err_mask; int rc; spin_lock_irqsave(ap->lock, flags); /* no internal command while frozen */ if (ap->pflags & ATA_PFLAG_FROZEN) { spin_unlock_irqrestore(ap->lock, flags); return AC_ERR_SYSTEM; } /* initialize internal qc */ /* XXX: Tag 0 is used for drivers with legacy EH as some * drivers choke if any other tag is given. This breaks * ata_tag_internal() test for those drivers. Don't use new * EH stuff without converting to it. */ if (ap->ops->error_handler) tag = ATA_TAG_INTERNAL; else tag = 0; if (test_and_set_bit(tag, &ap->qc_allocated)) BUG(); qc = __ata_qc_from_tag(ap, tag); qc->tag = tag; qc->scsicmd = NULL; qc->ap = ap; qc->dev = dev; ata_qc_reinit(qc); preempted_tag = link->active_tag; preempted_sactive = link->sactive; preempted_qc_active = ap->qc_active; preempted_nr_active_links = ap->nr_active_links; link->active_tag = ATA_TAG_POISON; link->sactive = 0; ap->qc_active = 0; ap->nr_active_links = 0; /* prepare & issue qc */ qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { unsigned int i, buflen = 0; struct scatterlist *sg; for_each_sg(sgl, sg, n_elem, i) buflen += sg->length; ata_sg_init(qc, sgl, n_elem); qc->nbytes = buflen; } qc->private_data = &wait; qc->complete_fn = ata_qc_complete_internal; ata_qc_issue(qc); spin_unlock_irqrestore(ap->lock, flags); if (!timeout) { if (ata_probe_timeout) timeout = ata_probe_timeout * 1000; else { timeout = ata_internal_cmd_timeout(dev, command); auto_timeout = 1; } } rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); ata_sff_flush_pio_task(ap); if (!rc) { spin_lock_irqsave(ap->lock, flags); /* We're racing with irq here. If we lose, the * following test prevents us from completing the qc * twice. If we win, the port is frozen and will be * cleaned up by ->post_internal_cmd(). */ if (qc->flags & ATA_QCFLAG_ACTIVE) { qc->err_mask |= AC_ERR_TIMEOUT; if (ap->ops->error_handler) ata_port_freeze(ap); else ata_qc_complete(qc); if (ata_msg_warn(ap)) ata_dev_printk(dev, KERN_WARNING, "qc timeout (cmd 0x%x)\n", command); } spin_unlock_irqrestore(ap->lock, flags); } /* do post_internal_cmd */ if (ap->ops->post_internal_cmd) ap->ops->post_internal_cmd(qc); /* perform minimal error analysis */ if (qc->flags & ATA_QCFLAG_FAILED) { if (qc->result_tf.command & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; if (!qc->err_mask) qc->err_mask |= AC_ERR_OTHER; if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; } /* finish up */ spin_lock_irqsave(ap->lock, flags); *tf = qc->result_tf; err_mask = qc->err_mask; ata_qc_free(qc); link->active_tag = preempted_tag; link->sactive = preempted_sactive; ap->qc_active = preempted_qc_active; ap->nr_active_links = preempted_nr_active_links; spin_unlock_irqrestore(ap->lock, flags); if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) ata_internal_cmd_timed_out(dev, command); return err_mask; } /** * ata_exec_internal - execute libata internal command * @dev: Device to which the command is sent * @tf: Taskfile registers for the command and the result * @cdb: CDB for packet command * @dma_dir: Data tranfer direction of the command * @buf: Data buffer of the command * @buflen: Length of data buffer * @timeout: Timeout in msecs (0 for default) * * Wrapper around ata_exec_internal_sg() which takes simple * buffer instead of sg list. * * LOCKING: * None. Should be called with kernel context, might sleep. * * RETURNS: * Zero on success, AC_ERR_* mask on failure */ unsigned ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, int dma_dir, void *buf, unsigned int buflen, unsigned long timeout) { struct scatterlist *psg = NULL, sg; unsigned int n_elem = 0; if (dma_dir != DMA_NONE) { WARN_ON(!buf); sg_init_one(&sg, buf, buflen); psg = &sg; n_elem++; } return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, timeout); } /** * ata_do_simple_cmd - execute simple internal command * @dev: Device to which the command is sent * @cmd: Opcode to execute * * Execute a 'simple' command, that only consists of the opcode * 'cmd' itself, without filling any other registers * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Zero on success, AC_ERR_* mask on failure */ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) { struct ata_taskfile tf; ata_tf_init(dev, &tf); tf.command = cmd; tf.flags |= ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); } /** * ata_pio_need_iordy - check if iordy needed * @adev: ATA device * * Check if the current speed of the device requires IORDY. Used * by various controllers for chip configuration. */ unsigned int ata_pio_need_iordy(const struct ata_device *adev) { /* Don't set IORDY if we're preparing for reset. IORDY may * lead to controller lock up on certain controllers if the * port is not occupied. See bko#11703 for details. */ if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) return 0; /* Controller doesn't support IORDY. Probably a pointless * check as the caller should know this. */ if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) return 0; /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ if (ata_id_is_cfa(adev->id) && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) return 0; /* PIO3 and higher it is mandatory */ if (adev->pio_mode > XFER_PIO_2) return 1; /* We turn it on when possible */ if (ata_id_has_iordy(adev->id)) return 1; return 0; } /** * ata_pio_mask_no_iordy - Return the non IORDY mask * @adev: ATA device * * Compute the highest mode possible if we are not using iordy. Return * -1 if no iordy mode is available. */ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) { /* If we have no drive specific rule, then PIO 2 is non IORDY */ if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ u16 pio = adev->id[ATA_ID_EIDE_PIO]; /* Is the speed faster than the drive allows non IORDY ? */ if (pio) { /* This is cycle times not frequency - watch the logic! */ if (pio > 240) /* PIO2 is 240nS per cycle */ return 3 << ATA_SHIFT_PIO; return 7 << ATA_SHIFT_PIO; } } return 3 << ATA_SHIFT_PIO; } /** * ata_do_dev_read_id - default ID read method * @dev: device * @tf: proposed taskfile * @id: data buffer * * Issue the identify taskfile and hand back the buffer containing * identify data. For some RAID controllers and for pre ATA devices * this function is wrapped or replaced by the driver */ unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id) { return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, id, sizeof(id[0]) * ATA_ID_WORDS, 0); } /** * ata_dev_read_id - Read ID data from the specified device * @dev: target device * @p_class: pointer to class of the target device (may be changed) * @flags: ATA_READID_* flags * @id: buffer to read IDENTIFY data into * * Read ID data from the specified device. ATA_CMD_ID_ATA is * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS * for pre-ATA4 drives. * * FIXME: ATA_CMD_ID_ATA is optional for early drives and right * now we abort if we hit that case. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno otherwise. */ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, unsigned int flags, u16 *id) { struct ata_port *ap = dev->link->ap; unsigned int class = *p_class; struct ata_taskfile tf; unsigned int err_mask = 0; const char *reason; bool is_semb = class == ATA_DEV_SEMB; int may_fallback = 1, tried_spinup = 0; int rc; if (ata_msg_ctl(ap)) ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); retry: ata_tf_init(dev, &tf); switch (class) { case ATA_DEV_SEMB: class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ case ATA_DEV_ATA: tf.command = ATA_CMD_ID_ATA; break; case ATA_DEV_ATAPI: tf.command = ATA_CMD_ID_ATAPI; break; default: rc = -ENODEV; reason = "unsupported class"; goto err_out; } tf.protocol = ATA_PROT_PIO; /* Some devices choke if TF registers contain garbage. Make * sure those are properly initialized. */ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* Device presence detection is unreliable on some * controllers. Always poll IDENTIFY if available. */ tf.flags |= ATA_TFLAG_POLLING; if (ap->ops->read_id) err_mask = ap->ops->read_id(dev, &tf, id); else err_mask = ata_do_dev_read_id(dev, &tf, id); if (err_mask) { if (err_mask & AC_ERR_NODEV_HINT) { ata_dev_printk(dev, KERN_DEBUG, "NODEV after polling detection\n"); return -ENOENT; } if (is_semb) { ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " "device w/ SEMB sig, disabled\n"); /* SEMB is not supported yet */ *p_class = ATA_DEV_SEMB_UNSUP; return 0; } if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { /* Device or controller might have reported * the wrong device class. Give a shot at the * other IDENTIFY if the current one is * aborted by the device. */ if (may_fallback) { may_fallback = 0; if (class == ATA_DEV_ATA) class = ATA_DEV_ATAPI; else class = ATA_DEV_ATA; goto retry; } /* Control reaches here iff the device aborted * both flavors of IDENTIFYs which happens * sometimes with phantom devices. */ ata_dev_printk(dev, KERN_DEBUG, "both IDENTIFYs aborted, assuming NODEV\n"); return -ENOENT; } rc = -EIO; reason = "I/O error"; goto err_out; } if (dev->horkage & ATA_HORKAGE_DUMP_ID) { ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " "class=%d may_fallback=%d tried_spinup=%d\n", class, may_fallback, tried_spinup); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); } /* Falling back doesn't make sense if ID data was read * successfully at least once. */ may_fallback = 0; swap_buf_le16(id, ATA_ID_WORDS); /* sanity check */ rc = -EINVAL; reason = "device reports invalid type"; if (class == ATA_DEV_ATA) { if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) goto err_out; } else { if (ata_id_is_ata(id)) goto err_out; } if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { tried_spinup = 1; /* * Drive powered-up in standby mode, and requires a specific * SET_FEATURES spin-up subcommand before it will accept * anything other than the original IDENTIFY command. */ err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); if (err_mask && id[2] != 0x738c) { rc = -EIO; reason = "SPINUP failed"; goto err_out; } /* * If the drive initially returned incomplete IDENTIFY info, * we now must reissue the IDENTIFY command. */ if (id[2] == 0x37c8) goto retry; } if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { /* * The exact sequence expected by certain pre-ATA4 drives is: * SRST RESET * IDENTIFY (optional in early ATA) * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) * anything else.. * Some drives were very specific about that exact sequence. * * Note that ATA4 says lba is mandatory so the second check * should never trigger. */ if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { err_mask = ata_dev_init_params(dev, id[3], id[6]); if (err_mask) { rc = -EIO; reason = "INIT_DEV_PARAMS failed"; goto err_out; } /* current CHS translation info (id[53-58]) might be * changed. reread the identify device info. */ flags &= ~ATA_READID_POSTRESET; goto retry; } } *p_class = class; return 0; err_out: if (ata_msg_warn(ap)) ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " "(%s, err_mask=0x%x)\n", reason, err_mask); return rc; } static int ata_do_link_spd_horkage(struct ata_device *dev) { struct ata_link *plink = ata_dev_phys_link(dev); u32 target, target_limit; if (!sata_scr_valid(plink)) return 0; if (dev->horkage & ATA_HORKAGE_1_5_GBPS) target = 1; else return 0; target_limit = (1 << target) - 1; /* if already on stricter limit, no need to push further */ if (plink->sata_spd_limit <= target_limit) return 0; plink->sata_spd_limit = target_limit; /* Request another EH round by returning -EAGAIN if link is * going faster than the target speed. Forward progress is * guaranteed by setting sata_spd_limit to target_limit above. */ if (plink->sata_spd > target) { ata_dev_printk(dev, KERN_INFO, "applying link speed limit horkage to %s\n", sata_spd_string(target)); return -EAGAIN; } return 0; } static inline u8 ata_dev_knobble(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) return 0; return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); } static int ata_dev_config_ncq(struct ata_device *dev, char *desc, size_t desc_sz) { struct ata_port *ap = dev->link->ap; int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); unsigned int err_mask; char *aa_desc = ""; if (!ata_id_has_ncq(dev->id)) { desc[0] = '\0'; return 0; } if (dev->horkage & ATA_HORKAGE_NONCQ) { snprintf(desc, desc_sz, "NCQ (not used)"); return 0; } if (ap->flags & ATA_FLAG_NCQ) { hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); dev->flags |= ATA_DFLAG_NCQ; } if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && (ap->flags & ATA_FLAG_FPDMA_AA) && ata_id_has_fpdma_aa(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_FPDMA_AA); if (err_mask) { ata_dev_printk(dev, KERN_ERR, "failed to enable AA" "(error_mask=0x%x)\n", err_mask); if (err_mask != AC_ERR_DEV) { dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; return -EIO; } } else aa_desc = ", AA"; } if (hdepth >= ddepth) snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); else snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, ddepth, aa_desc); return 0; } /** * ata_dev_configure - Configure the specified ATA/ATAPI device * @dev: Target device to configure * * Configure @dev according to @dev->id. Generic and low-level * driver specific fixups are also applied. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno otherwise */ int ata_dev_configure(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; struct ata_eh_context *ehc = &dev->link->eh_context; int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; const u16 *id = dev->id; unsigned long xfer_mask; char revbuf[7]; /* XYZ-99\0 */ char fwrevbuf[ATA_ID_FW_REV_LEN+1]; char modelbuf[ATA_ID_PROD_LEN+1]; int rc; if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", __func__); return 0; } if (ata_msg_probe(ap)) ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); /* set horkage */ dev->horkage |= ata_dev_blacklisted(dev); ata_force_horkage(dev); if (dev->horkage & ATA_HORKAGE_DISABLE) { ata_dev_printk(dev, KERN_INFO, "unsupported device, disabling\n"); ata_dev_disable(dev); return 0; } if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && dev->class == ATA_DEV_ATAPI) { ata_dev_printk(dev, KERN_WARNING, "WARNING: ATAPI is %s, device ignored.\n", atapi_enabled ? "not supported with this driver" : "disabled"); ata_dev_disable(dev); return 0; } rc = ata_do_link_spd_horkage(dev); if (rc) return rc; /* let ACPI work its magic */ rc = ata_acpi_on_devcfg(dev); if (rc) return rc; /* massage HPA, do it early as it might change IDENTIFY data */ rc = ata_hpa_resize(dev); if (rc) return rc; /* print device capabilities */ if (ata_msg_probe(ap)) ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " "85:%04x 86:%04x 87:%04x 88:%04x\n", __func__, id[49], id[82], id[83], id[84], id[85], id[86], id[87], id[88]); /* initialize to-be-configured parameters */ dev->flags &= ~ATA_DFLAG_CFG_MASK; dev->max_sectors = 0; dev->cdb_len = 0; dev->n_sectors = 0; dev->cylinders = 0; dev->heads = 0; dev->sectors = 0; dev->multi_count = 0; /* * common ATA, ATAPI feature tests */ /* find max transfer mode; for printk only */ xfer_mask = ata_id_xfermask(id); if (ata_msg_probe(ap)) ata_dump_id(id); /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, sizeof(fwrevbuf)); ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, sizeof(modelbuf)); /* ATA-specific feature tests */ if (dev->class == ATA_DEV_ATA) { if (ata_id_is_cfa(id)) { /* CPRM may make this media unusable */ if (id[ATA_ID_CFA_KEY_MGMT] & 1) ata_dev_printk(dev, KERN_WARNING, "supports DRM functions and may " "not be fully accessable.\n"); snprintf(revbuf, 7, "CFA"); } else { snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); /* Warn the user if the device has TPM extensions */ if (ata_id_has_tpm(id)) ata_dev_printk(dev, KERN_WARNING, "supports DRM functions and may " "not be fully accessable.\n"); } dev->n_sectors = ata_id_n_sectors(id); /* get current R/W Multiple count setting */ if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { unsigned int max = dev->id[47] & 0xff; unsigned int cnt = dev->id[59] & 0xff; /* only recognize/allow powers of two here */ if (is_power_of_2(max) && is_power_of_2(cnt)) if (cnt <= max) dev->multi_count = cnt; } if (ata_id_has_lba(id)) { const char *lba_desc; char ncq_desc[24]; lba_desc = "LBA"; dev->flags |= ATA_DFLAG_LBA; if (ata_id_has_lba48(id)) { dev->flags |= ATA_DFLAG_LBA48; lba_desc = "LBA48"; if (dev->n_sectors >= (1UL << 28) && ata_id_has_flush_ext(id)) dev->flags |= ATA_DFLAG_FLUSH_EXT; } /* config NCQ */ rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); if (rc) return rc; /* print device info to dmesg */ if (ata_msg_drv(ap) && print_info) { ata_dev_printk(dev, KERN_INFO, "%s: %s, %s, max %s\n", revbuf, modelbuf, fwrevbuf, ata_mode_string(xfer_mask)); ata_dev_printk(dev, KERN_INFO, "%Lu sectors, multi %u: %s %s\n", (unsigned long long)dev->n_sectors, dev->multi_count, lba_desc, ncq_desc); } } else { /* CHS */ /* Default translation */ dev->cylinders = id[1]; dev->heads = id[3]; dev->sectors = id[6]; if (ata_id_current_chs_valid(id)) { /* Current CHS translation is valid. */ dev->cylinders = id[54]; dev->heads = id[55]; dev->sectors = id[56]; } /* print device info to dmesg */ if (ata_msg_drv(ap) && print_info) { ata_dev_printk(dev, KERN_INFO, "%s: %s, %s, max %s\n", revbuf, modelbuf, fwrevbuf, ata_mode_string(xfer_mask)); ata_dev_printk(dev, KERN_INFO, "%Lu sectors, multi %u, CHS %u/%u/%u\n", (unsigned long long)dev->n_sectors, dev->multi_count, dev->cylinders, dev->heads, dev->sectors); } } dev->cdb_len = 16; } /* ATAPI-specific feature tests */ else if (dev->class == ATA_DEV_ATAPI) { const char *cdb_intr_string = ""; const char *atapi_an_string = ""; const char *dma_dir_string = ""; u32 sntf; rc = atapi_cdb_len(id); if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { if (ata_msg_warn(ap)) ata_dev_printk(dev, KERN_WARNING, "unsupported CDB len\n"); rc = -EINVAL; goto err_out_nosup; } dev->cdb_len = (unsigned int) rc; /* Enable ATAPI AN if both the host and device have * the support. If PMP is attached, SNTF is required * to enable ATAPI AN to discern between PHY status * changed notifications and ATAPI ANs. */ if (atapi_an && (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && (!sata_pmp_attached(ap) || sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { unsigned int err_mask; /* issue SET feature command to turn this on */ err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_AN); if (err_mask) ata_dev_printk(dev, KERN_ERR, "failed to enable ATAPI AN " "(err_mask=0x%x)\n", err_mask); else { dev->flags |= ATA_DFLAG_AN; atapi_an_string = ", ATAPI AN"; } } if (ata_id_cdb_intr(dev->id)) { dev->flags |= ATA_DFLAG_CDB_INTR; cdb_intr_string = ", CDB intr"; } if (atapi_dmadir || atapi_id_dmadir(dev->id)) { dev->flags |= ATA_DFLAG_DMADIR; dma_dir_string = ", DMADIR"; } /* print device info to dmesg */ if (ata_msg_drv(ap) && print_info) ata_dev_printk(dev, KERN_INFO, "ATAPI: %s, %s, max %s%s%s%s\n", modelbuf, fwrevbuf, ata_mode_string(xfer_mask), cdb_intr_string, atapi_an_string, dma_dir_string); } /* determine max_sectors */ dev->max_sectors = ATA_MAX_SECTORS; if (dev->flags & ATA_DFLAG_LBA48) dev->max_sectors = ATA_MAX_SECTORS_LBA48; if (!(dev->horkage & ATA_HORKAGE_IPM)) { if (ata_id_has_hipm(dev->id)) dev->flags |= ATA_DFLAG_HIPM; if (ata_id_has_dipm(dev->id)) dev->flags |= ATA_DFLAG_DIPM; } /* Limit PATA drive on SATA cable bridge transfers to udma5, 200 sectors */ if (ata_dev_knobble(dev)) { if (ata_msg_drv(ap) && print_info) ata_dev_printk(dev, KERN_INFO, "applying bridge limits\n"); dev->udma_mask &= ATA_UDMA5; dev->max_sectors = ATA_MAX_SECTORS; } if ((dev->class == ATA_DEV_ATAPI) && (atapi_command_packet_set(id) == TYPE_TAPE)) { dev->max_sectors = ATA_MAX_SECTORS_TAPE; dev->horkage |= ATA_HORKAGE_STUCK_ERR; } if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { dev->horkage |= ATA_HORKAGE_IPM; /* reset link pm_policy for this port to no pm */ ap->pm_policy = MAX_PERFORMANCE; } if (ap->ops->dev_config) ap->ops->dev_config(dev); if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { /* Let the user know. We don't want to disallow opens for rescue purposes, or in case the vendor is just a blithering idiot. Do this after the dev_config call as some controllers with buggy firmware may want to avoid reporting false device bugs */ if (print_info) { ata_dev_printk(dev, KERN_WARNING, "Drive reports diagnostics failure. This may indicate a drive\n"); ata_dev_printk(dev, KERN_WARNING, "fault or invalid emulation. Contact drive vendor for information.\n"); } } if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " "firmware update to be fully functional.\n"); ata_dev_printk(dev, KERN_WARNING, " contact the vendor " "or visit http://ata.wiki.kernel.org.\n"); } return 0; err_out_nosup: if (ata_msg_probe(ap)) ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, err\n", __func__); return rc; } /** * ata_cable_40wire - return 40 wire cable type * @ap: port * * Helper method for drivers which want to hardwire 40 wire cable * detection. */ int ata_cable_40wire(struct ata_port *ap) { return ATA_CBL_PATA40; } /** * ata_cable_80wire - return 80 wire cable type * @ap: port * * Helper method for drivers which want to hardwire 80 wire cable * detection. */ int ata_cable_80wire(struct ata_port *ap) { return ATA_CBL_PATA80; } /** * ata_cable_unknown - return unknown PATA cable. * @ap: port * * Helper method for drivers which have no PATA cable detection. */ int ata_cable_unknown(struct ata_port *ap) { return ATA_CBL_PATA_UNK; } /** * ata_cable_ignore - return ignored PATA cable. * @ap: port * * Helper method for drivers which don't use cable type to limit * transfer mode. */ int ata_cable_ignore(struct ata_port *ap) { return ATA_CBL_PATA_IGN; } /** * ata_cable_sata - return SATA cable type * @ap: port * * Helper method for drivers which have SATA cables */ int ata_cable_sata(struct ata_port *ap) { return ATA_CBL_SATA; } /** * ata_bus_probe - Reset and probe ATA bus * @ap: Bus to probe * * Master ATA bus probing function. Initiates a hardware-dependent * bus reset, then attempts to identify any devices found on * the bus. * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * Zero on success, negative errno otherwise. */ int ata_bus_probe(struct ata_port *ap) { unsigned int classes[ATA_MAX_DEVICES]; int tries[ATA_MAX_DEVICES]; int rc; struct ata_device *dev; ata_for_each_dev(dev, &ap->link, ALL) tries[dev->devno] = ATA_PROBE_MAX_TRIES; retry: ata_for_each_dev(dev, &ap->link, ALL) { /* If we issue an SRST then an ATA drive (not ATAPI) * may change configuration and be in PIO0 timing. If * we do a hard reset (or are coming from power on) * this is true for ATA or ATAPI. Until we've set a * suitable controller mode we should not touch the * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't * touch the DMA setup as that will be dealt with when * configuring devices. */ if (ap->ops->set_piomode) ap->ops->set_piomode(ap, dev); } /* reset and determine device classes */ ap->ops->phy_reset(ap); ata_for_each_dev(dev, &ap->link, ALL) { if (dev->class != ATA_DEV_UNKNOWN) classes[dev->devno] = dev->class; else classes[dev->devno] = ATA_DEV_NONE; dev->class = ATA_DEV_UNKNOWN; } /* read IDENTIFY page and configure devices. We have to do the identify specific sequence bass-ackwards so that PDIAG- is released by the slave device */ ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { if (tries[dev->devno]) dev->class = classes[dev->devno]; if (!ata_dev_enabled(dev)) continue; rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, dev->id); if (rc) goto fail; } /* Now ask for the cable type as PDIAG- should have been released */ if (ap->ops->cable_detect) ap->cbl = ap->ops->cable_detect(ap); /* We may have SATA bridge glue hiding here irrespective of * the reported cable types and sensed types. When SATA * drives indicate we have a bridge, we don't know which end * of the link the bridge is which is a problem. */ ata_for_each_dev(dev, &ap->link, ENABLED) if (ata_id_is_sata(dev->id)) ap->cbl = ATA_CBL_SATA; /* After the identify sequence we can now set up the devices. We do this in the normal order so that the user doesn't get confused */ ata_for_each_dev(dev, &ap->link, ENABLED) { ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; if (rc) goto fail; } /* configure transfer mode */ rc = ata_set_mode(&ap->link, &dev); if (rc) goto fail; ata_for_each_dev(dev, &ap->link, ENABLED) return 0; return -ENODEV; fail: tries[dev->devno]--; switch (rc) { case -EINVAL: /* eeek, something went very wrong, give up */ tries[dev->devno] = 0; break; case -ENODEV: /* give it just one more chance */ tries[dev->devno] = min(tries[dev->devno], 1); case -EIO: if (tries[dev->devno] == 1) { /* This is the last chance, better to slow * down than lose it. */ sata_down_spd_limit(&ap->link, 0); ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); } } if (!tries[dev->devno]) ata_dev_disable(dev); goto retry; } /** * sata_print_link_status - Print SATA link status * @link: SATA link to printk link status about * * This function prints link speed and status of a SATA link. * * LOCKING: * None. */ static void sata_print_link_status(struct ata_link *link) { u32 sstatus, scontrol, tmp; if (sata_scr_read(link, SCR_STATUS, &sstatus)) return; sata_scr_read(link, SCR_CONTROL, &scontrol); if (ata_phys_link_online(link)) { tmp = (sstatus >> 4) & 0xf; ata_link_printk(link, KERN_INFO, "SATA link up %s (SStatus %X SControl %X)\n", sata_spd_string(tmp), sstatus, scontrol); } else { ata_link_printk(link, KERN_INFO, "SATA link down (SStatus %X SControl %X)\n", sstatus, scontrol); } } /** * ata_dev_pair - return other device on cable * @adev: device * * Obtain the other device on the same cable, or if none is * present NULL is returned */ struct ata_device *ata_dev_pair(struct ata_device *adev) { struct ata_link *link = adev->link; struct ata_device *pair = &link->device[1 - adev->devno]; if (!ata_dev_enabled(pair)) return NULL; return pair; } /** * sata_down_spd_limit - adjust SATA spd limit downward * @link: Link to adjust SATA spd limit for * @spd_limit: Additional limit * * Adjust SATA spd limit of @link downward. Note that this * function only adjusts the limit. The change must be applied * using sata_set_spd(). * * If @spd_limit is non-zero, the speed is limited to equal to or * lower than @spd_limit if such speed is supported. If * @spd_limit is slower than any supported speed, only the lowest * supported speed is allowed. * * LOCKING: * Inherited from caller. * * RETURNS: * 0 on success, negative errno on failure */ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) { u32 sstatus, spd, mask; int rc, bit; if (!sata_scr_valid(link)) return -EOPNOTSUPP; /* If SCR can be read, use it to determine the current SPD. * If not, use cached value in link->sata_spd. */ rc = sata_scr_read(link, SCR_STATUS, &sstatus); if (rc == 0 && ata_sstatus_online(sstatus)) spd = (sstatus >> 4) & 0xf; else spd = link->sata_spd; mask = link->sata_spd_limit; if (mask <= 1) return -EINVAL; /* unconditionally mask off the highest bit */ bit = fls(mask) - 1; mask &= ~(1 << bit); /* Mask off all speeds higher than or equal to the current * one. Force 1.5Gbps if current SPD is not available. */ if (spd > 1) mask &= (1 << (spd - 1)) - 1; else mask &= 1; /* were we already at the bottom? */ if (!mask) return -EINVAL; if (spd_limit) { if (mask & ((1 << spd_limit) - 1)) mask &= (1 << spd_limit) - 1; else { bit = ffs(mask) - 1; mask = 1 << bit; } } link->sata_spd_limit = mask; ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", sata_spd_string(fls(mask))); return 0; } static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) { struct ata_link *host_link = &link->ap->link; u32 limit, target, spd; limit = link->sata_spd_limit; /* Don't configure downstream link faster than upstream link. * It doesn't speed up anything and some PMPs choke on such * configuration. */ if (!ata_is_host_link(link) && host_link->sata_spd) limit &= (1 << host_link->sata_spd) - 1; if (limit == UINT_MAX) target = 0; else target = fls(limit); spd = (*scontrol >> 4) & 0xf; *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); return spd != target; } /** * sata_set_spd_needed - is SATA spd configuration needed * @link: Link in question * * Test whether the spd limit in SControl matches * @link->sata_spd_limit. This function is used to determine * whether hardreset is necessary to apply SATA spd * configuration. * * LOCKING: * Inherited from caller. * * RETURNS: * 1 if SATA spd configuration is needed, 0 otherwise. */ static int sata_set_spd_needed(struct ata_link *link) { u32 scontrol; if (sata_scr_read(link, SCR_CONTROL, &scontrol)) return 1; return __sata_set_spd_needed(link, &scontrol); } /** * sata_set_spd - set SATA spd according to spd limit * @link: Link to set SATA spd for * * Set SATA spd of @link according to sata_spd_limit. * * LOCKING: * Inherited from caller. * * RETURNS: * 0 if spd doesn't need to be changed, 1 if spd has been * changed. Negative errno if SCR registers are inaccessible. */ int sata_set_spd(struct ata_link *link) { u32 scontrol; int rc; if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) return rc; if (!__sata_set_spd_needed(link, &scontrol)) return 0; if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) return rc; return 1; } /* * This mode timing computation functionality is ported over from * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik */ /* * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). * These were taken from ATA/ATAPI-6 standard, rev 0a, except * for UDMA6, which is currently supported only by Maxtor drives. * * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. */ static const struct ata_timing ata_timing[] = { /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, { 0xFF } }; #define ENOUGH(v, unit) (((v)-1)/(unit)+1) #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) { q->setup = EZ(t->setup * 1000, T); q->act8b = EZ(t->act8b * 1000, T); q->rec8b = EZ(t->rec8b * 1000, T); q->cyc8b = EZ(t->cyc8b * 1000, T); q->active = EZ(t->active * 1000, T); q->recover = EZ(t->recover * 1000, T); q->dmack_hold = EZ(t->dmack_hold * 1000, T); q->cycle = EZ(t->cycle * 1000, T); q->udma = EZ(t->udma * 1000, UT); } void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, struct ata_timing *m, unsigned int what) { if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); } const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) { const struct ata_timing *t = ata_timing; while (xfer_mode > t->mode) t++; if (xfer_mode == t->mode) return t; return NULL; } int ata_timing_compute(struct ata_device *adev, unsigned short speed, struct ata_timing *t, int T, int UT) { const u16 *id = adev->id; const struct ata_timing *s; struct ata_timing p; /* * Find the mode. */ if (!(s = ata_timing_find_mode(speed))) return -EINVAL; memcpy(t, s, sizeof(*s)); /* * If the drive is an EIDE drive, it can tell us it needs extended * PIO/MW_DMA cycle timing. */ if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; else if ((speed <= XFER_PIO_4) || (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) p.cycle = id[ATA_ID_EIDE_DMA_MIN]; ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); } /* * Convert the timing to bus clock counts. */ ata_timing_quantize(t, t, T, UT); /* * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, * S.M.A.R.T * and some other commands. We have to ensure that the * DMA cycle timing is slower/equal than the fastest PIO timing. */ if (speed > XFER_PIO_6) { ata_timing_compute(adev, adev->pio_mode, &p, T, UT); ata_timing_merge(&p, t, t, ATA_TIMING_ALL); } /* * Lengthen active & recovery time so that cycle time is correct. */ if (t->act8b + t->rec8b < t->cyc8b) { t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; t->rec8b = t->cyc8b - t->act8b; } if (t->active + t->recover < t->cycle) { t->active += (t->cycle - (t->active + t->recover)) / 2; t->recover = t->cycle - t->active; } /* In a few cases quantisation may produce enough errors to leave t->cycle too low for the sum of active and recovery if so we must correct this */ if (t->active + t->recover > t->cycle) t->cycle = t->active + t->recover; return 0; } /** * ata_timing_cycle2mode - find xfer mode for the specified cycle duration * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. * @cycle: cycle duration in ns * * Return matching xfer mode for @cycle. The returned mode is of * the transfer type specified by @xfer_shift. If @cycle is too * slow for @xfer_shift, 0xff is returned. If @cycle is faster * than the fastest known mode, the fasted mode is returned. * * LOCKING: * None. * * RETURNS: * Matching xfer_mode, 0xff if no match found. */ u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) { u8 base_mode = 0xff, last_mode = 0xff; const struct ata_xfer_ent *ent; const struct ata_timing *t; for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) if (ent->shift == xfer_shift) base_mode = ent->base; for (t = ata_timing_find_mode(base_mode); t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { unsigned short this_cycle; switch (xfer_shift) { case ATA_SHIFT_PIO: case ATA_SHIFT_MWDMA: this_cycle = t->cycle; break; case ATA_SHIFT_UDMA: this_cycle = t->udma; break; default: return 0xff; } if (cycle > this_cycle) break; last_mode = t->mode; } return last_mode; } /** * ata_down_xfermask_limit - adjust dev xfer masks downward * @dev: Device to adjust xfer masks * @sel: ATA_DNXFER_* selector * * Adjust xfer masks of @dev downward. Note that this function * does not apply the change. Invoking ata_set_mode() afterwards * will apply the limit. * * LOCKING: * Inherited from caller. * * RETURNS: * 0 on success, negative errno on failure */ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) { char buf[32]; unsigned long orig_mask, xfer_mask; unsigned long pio_mask, mwdma_mask, udma_mask; int quiet, highbit; quiet = !!(sel & ATA_DNXFER_QUIET); sel &= ~ATA_DNXFER_QUIET; xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, dev->udma_mask); ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); switch (sel) { case ATA_DNXFER_PIO: highbit = fls(pio_mask) - 1; pio_mask &= ~(1 << highbit); break; case ATA_DNXFER_DMA: if (udma_mask) { highbit = fls(udma_mask) - 1; udma_mask &= ~(1 << highbit); if (!udma_mask) return -ENOENT; } else if (mwdma_mask) { highbit = fls(mwdma_mask) - 1; mwdma_mask &= ~(1 << highbit); if (!mwdma_mask) return -ENOENT; } break; case ATA_DNXFER_40C: udma_mask &= ATA_UDMA_MASK_40C; break; case ATA_DNXFER_FORCE_PIO0: pio_mask &= 1; case ATA_DNXFER_FORCE_PIO: mwdma_mask = 0; udma_mask = 0; break; default: BUG(); } xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) return -ENOENT; if (!quiet) { if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) snprintf(buf, sizeof(buf), "%s:%s", ata_mode_string(xfer_mask), ata_mode_string(xfer_mask & ATA_MASK_PIO)); else snprintf(buf, sizeof(buf), "%s", ata_mode_string(xfer_mask)); ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n", buf); } ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, &dev->udma_mask); return 0; } static int ata_dev_set_mode(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; struct ata_eh_context *ehc = &dev->link->eh_context; const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; const char *dev_err_whine = ""; int ign_dev_err = 0; unsigned int err_mask = 0; int rc; dev->flags &= ~ATA_DFLAG_PIO; if (dev->xfer_shift == ATA_SHIFT_PIO) dev->flags |= ATA_DFLAG_PIO; if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) dev_err_whine = " (SET_XFERMODE skipped)"; else { if (nosetxfer) ata_dev_printk(dev, KERN_WARNING, "NOSETXFER but PATA detected - can't " "skip SETXFER, might malfunction\n"); err_mask = ata_dev_set_xfermode(dev); } if (err_mask & ~AC_ERR_DEV) goto fail; /* revalidate */ ehc->i.flags |= ATA_EHI_POST_SETMODE; rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); ehc->i.flags &= ~ATA_EHI_POST_SETMODE; if (rc) return rc; if (dev->xfer_shift == ATA_SHIFT_PIO) { /* Old CFA may refuse this command, which is just fine */ if (ata_id_is_cfa(dev->id)) ign_dev_err = 1; /* Catch several broken garbage emulations plus some pre ATA devices */ if (ata_id_major_version(dev->id) == 0 && dev->pio_mode <= XFER_PIO_2) ign_dev_err = 1; /* Some very old devices and some bad newer ones fail any kind of SET_XFERMODE request but support PIO0-2 timings and no IORDY */ if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) ign_dev_err = 1; } /* Early MWDMA devices do DMA but don't allow DMA mode setting. Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ if (dev->xfer_shift == ATA_SHIFT_MWDMA && dev->dma_mode == XFER_MW_DMA_0 && (dev->id[63] >> 8) & 1) ign_dev_err = 1; /* if the device is actually configured correctly, ignore dev err */ if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) ign_dev_err = 1; if (err_mask & AC_ERR_DEV) { if (!ign_dev_err) goto fail; else dev_err_whine = " (device error ignored)"; } DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", dev->xfer_shift, (int)dev->xfer_mode); ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), dev_err_whine); return 0; fail: ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " "(err_mask=0x%x)\n", err_mask); return -EIO; } /** * ata_do_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed * @r_failed_dev: out parameter for failed device * * Standard implementation of the function used to tune and set * ATA device disk transfer mode (PIO3, UDMA6, etc.). If * ata_dev_set_mode() fails, pointer to the failing device is * returned in @r_failed_dev. * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * 0 on success, negative errno otherwise */ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_device *dev; int rc = 0, used_dma = 0, found = 0; /* step 1: calculate xfer_mask */ ata_for_each_dev(dev, link, ENABLED) { unsigned long pio_mask, dma_mask; unsigned int mode_mask; mode_mask = ATA_DMA_MASK_ATA; if (dev->class == ATA_DEV_ATAPI) mode_mask = ATA_DMA_MASK_ATAPI; else if (ata_id_is_cfa(dev->id)) mode_mask = ATA_DMA_MASK_CFA; ata_dev_xfermask(dev); ata_force_xfermask(dev); pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); if (libata_dma_mask & mode_mask) dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); else dma_mask = 0; dev->pio_mode = ata_xfer_mask2mode(pio_mask); dev->dma_mode = ata_xfer_mask2mode(dma_mask); found = 1; if (ata_dma_enabled(dev)) used_dma = 1; } if (!found) goto out; /* step 2: always set host PIO timings */ ata_for_each_dev(dev, link, ENABLED) { if (dev->pio_mode == 0xff) { ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); rc = -EINVAL; goto out; } dev->xfer_mode = dev->pio_mode; dev->xfer_shift = ATA_SHIFT_PIO; if (ap->ops->set_piomode) ap->ops->set_piomode(ap, dev); } /* step 3: set host DMA timings */ ata_for_each_dev(dev, link, ENABLED) { if (!ata_dma_enabled(dev)) continue; dev->xfer_mode = dev->dma_mode; dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); if (ap->ops->set_dmamode) ap->ops->set_dmamode(ap, dev); } /* step 4: update devices' xfer mode */ ata_for_each_dev(dev, link, ENABLED) { rc = ata_dev_set_mode(dev); if (rc) goto out; } /* Record simplex status. If we selected DMA then the other * host channels are not permitted to do so. */ if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) ap->host->simplex_claimed = ap; out: if (rc) *r_failed_dev = dev; return rc; } /** * ata_wait_ready - wait for link to become ready * @link: link to be waited on * @deadline: deadline jiffies for the operation * @check_ready: callback to check link readiness * * Wait for @link to become ready. @check_ready should return * positive number if @link is ready, 0 if it isn't, -ENODEV if * link doesn't seem to be occupied, other errno for other error * conditions. * * Transient -ENODEV conditions are allowed for * ATA_TMOUT_FF_WAIT. * * LOCKING: * EH context. * * RETURNS: * 0 if @linke is ready before @deadline; otherwise, -errno. */ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { unsigned long start = jiffies; unsigned long nodev_deadline; int warned = 0; /* choose which 0xff timeout to use, read comment in libata.h */ if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); else nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); /* Slave readiness can't be tested separately from master. On * M/S emulation configuration, this function should be called * only on the master and it will handle both master and slave. */ WARN_ON(link == link->ap->slave_link); if (time_after(nodev_deadline, deadline)) nodev_deadline = deadline; while (1) { unsigned long now = jiffies; int ready, tmp; ready = tmp = check_ready(link); if (ready > 0) return 0; /* * -ENODEV could be transient. Ignore -ENODEV if link * is online. Also, some SATA devices take a long * time to clear 0xff after reset. Wait for * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't * offline. * * Note that some PATA controllers (pata_ali) explode * if status register is read more than once when * there's no device attached. */ if (ready == -ENODEV) { if (ata_link_online(link)) ready = 0; else if ((link->ap->flags & ATA_FLAG_SATA) && !ata_link_offline(link) && time_before(now, nodev_deadline)) ready = 0; } if (ready) return ready; if (time_after(now, deadline)) return -EBUSY; if (!warned && time_after(now, start + 5 * HZ) && (deadline - now > 3 * HZ)) { ata_link_printk(link, KERN_WARNING, "link is slow to respond, please be patient " "(ready=%d)\n", tmp); warned = 1; } msleep(50); } } /** * ata_wait_after_reset - wait for link to become ready after reset * @link: link to be waited on * @deadline: deadline jiffies for the operation * @check_ready: callback to check link readiness * * Wait for @link to become ready after reset. * * LOCKING: * EH context. * * RETURNS: * 0 if @linke is ready before @deadline; otherwise, -errno. */ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { msleep(ATA_WAIT_AFTER_RESET); return ata_wait_ready(link, deadline, check_ready); } /** * sata_link_debounce - debounce SATA phy status * @link: ATA link to debounce SATA phy status for * @params: timing parameters { interval, duratinon, timeout } in msec * @deadline: deadline jiffies for the operation * * Make sure SStatus of @link reaches stable state, determined by * holding the same value where DET is not 1 for @duration polled * every @interval, before @timeout. Timeout constraints the * beginning of the stable state. Because DET gets stuck at 1 on * some controllers after hot unplugging, this functions waits * until timeout then returns 0 if DET is stable at 1. * * @timeout is further limited by @deadline. The sooner of the * two is used. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno on failure. */ int sata_link_debounce(struct ata_link *link, const unsigned long *params, unsigned long deadline) { unsigned long interval = params[0]; unsigned long duration = params[1]; unsigned long last_jiffies, t; u32 last, cur; int rc; t = ata_deadline(jiffies, params[2]); if (time_before(t, deadline)) deadline = t; if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) return rc; cur &= 0xf; last = cur; last_jiffies = jiffies; while (1) { msleep(interval); if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) return rc; cur &= 0xf; /* DET stable? */ if (cur == last) { if (cur == 1 && time_before(jiffies, deadline)) continue; if (time_after(jiffies, ata_deadline(last_jiffies, duration))) return 0; continue; } /* unstable, start over */ last = cur; last_jiffies = jiffies; /* Check deadline. If debouncing failed, return * -EPIPE to tell upper layer to lower link speed. */ if (time_after(jiffies, deadline)) return -EPIPE; } } /** * sata_link_resume - resume SATA link * @link: ATA link to resume SATA * @params: timing parameters { interval, duratinon, timeout } in msec * @deadline: deadline jiffies for the operation * * Resume SATA phy @link and debounce it. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno on failure. */ int sata_link_resume(struct ata_link *link, const unsigned long *params, unsigned long deadline) { int tries = ATA_LINK_RESUME_TRIES; u32 scontrol, serror; int rc; if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) return rc; /* * Writes to SControl sometimes get ignored under certain * controllers (ata_piix SIDPR). Make sure DET actually is * cleared. */ do { scontrol = (scontrol & 0x0f0) | 0x300; if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) return rc; /* * Some PHYs react badly if SStatus is pounded * immediately after resuming. Delay 200ms before * debouncing. */ msleep(200); /* is SControl restored correctly? */ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) return rc; } while ((scontrol & 0xf0f) != 0x300 && --tries); if ((scontrol & 0xf0f) != 0x300) { ata_link_printk(link, KERN_ERR, "failed to resume link (SControl %X)\n", scontrol); return 0; } if (tries < ATA_LINK_RESUME_TRIES) ata_link_printk(link, KERN_WARNING, "link resume succeeded after %d retries\n", ATA_LINK_RESUME_TRIES - tries); if ((rc = sata_link_debounce(link, params, deadline))) return rc; /* clear SError, some PHYs require this even for SRST to work */ if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) rc = sata_scr_write(link, SCR_ERROR, serror); return rc != -EINVAL ? rc : 0; } /** * ata_std_prereset - prepare for reset * @link: ATA link to be reset * @deadline: deadline jiffies for the operation * * @link is about to be reset. Initialize it. Failure from * prereset makes libata abort whole reset sequence and give up * that port, so prereset should be best-effort. It does its * best to prepare for reset sequence but if things go wrong, it * should just whine, not fail. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno otherwise. */ int ata_std_prereset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; const unsigned long *timing = sata_ehc_deb_timing(ehc); int rc; /* if we're about to do hardreset, nothing more to do */ if (ehc->i.action & ATA_EH_HARDRESET) return 0; /* if SATA, resume link */ if (ap->flags & ATA_FLAG_SATA) { rc = sata_link_resume(link, timing, deadline); /* whine about phy resume failure but proceed */ if (rc && rc != -EOPNOTSUPP) ata_link_printk(link, KERN_WARNING, "failed to resume " "link for reset (errno=%d)\n", rc); } /* no point in trying softreset on offline link */ if (ata_phys_link_offline(link)) ehc->i.action &= ~ATA_EH_SOFTRESET; return 0; } /** * sata_link_hardreset - reset link via SATA phy reset * @link: link to reset * @timing: timing parameters { interval, duratinon, timeout } in msec * @deadline: deadline jiffies for the operation * @online: optional out parameter indicating link onlineness * @check_ready: optional callback to check link readiness * * SATA phy-reset @link using DET bits of SControl register. * After hardreset, link readiness is waited upon using * ata_wait_ready() if @check_ready is specified. LLDs are * allowed to not specify @check_ready and wait itself after this * function returns. Device classification is LLD's * responsibility. * * *@online is set to one iff reset succeeded and @link is online * after reset. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno otherwise. */ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, unsigned long deadline, bool *online, int (*check_ready)(struct ata_link *)) { u32 scontrol; int rc; DPRINTK("ENTER\n"); if (online) *online = false; if (sata_set_spd_needed(link)) { /* SATA spec says nothing about how to reconfigure * spd. To be on the safe side, turn off phy during * reconfiguration. This works for at least ICH7 AHCI * and Sil3124. */ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) goto out; scontrol = (scontrol & 0x0f0) | 0x304; if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) goto out; sata_set_spd(link); } /* issue phy wake/reset */ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) goto out; scontrol = (scontrol & 0x0f0) | 0x301; if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) goto out; /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 * 10.4.2 says at least 1 ms. */ msleep(1); /* bring link back */ rc = sata_link_resume(link, timing, deadline); if (rc) goto out; /* if link is offline nothing more to do */ if (ata_phys_link_offline(link)) goto out; /* Link is online. From this point, -ENODEV too is an error. */ if (online) *online = true; if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { /* If PMP is supported, we have to do follow-up SRST. * Some PMPs don't send D2H Reg FIS after hardreset if * the first port is empty. Wait only for * ATA_TMOUT_PMP_SRST_WAIT. */ if (check_ready) { unsigned long pmp_deadline; pmp_deadline = ata_deadline(jiffies, ATA_TMOUT_PMP_SRST_WAIT); if (time_after(pmp_deadline, deadline)) pmp_deadline = deadline; ata_wait_ready(link, pmp_deadline, check_ready); } rc = -EAGAIN; goto out; } rc = 0; if (check_ready) rc = ata_wait_ready(link, deadline, check_ready); out: if (rc && rc != -EAGAIN) { /* online is set iff link is online && reset succeeded */ if (online) *online = false; ata_link_printk(link, KERN_ERR, "COMRESET failed (errno=%d)\n", rc); } DPRINTK("EXIT, rc=%d\n", rc); return rc; } /** * sata_std_hardreset - COMRESET w/o waiting or classification * @link: link to reset * @class: resulting class of attached device * @deadline: deadline jiffies for the operation * * Standard SATA COMRESET w/o waiting or classification. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 if link offline, -EAGAIN if link online, -errno on errors. */ int sata_std_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); bool online; int rc; /* do hardreset */ rc = sata_link_hardreset(link, timing, deadline, &online, NULL); return online ? -EAGAIN : rc; } /** * ata_std_postreset - standard postreset callback * @link: the target ata_link * @classes: classes of attached devices * * This function is invoked after a successful reset. Note that * the device might have been reset more than once using * different reset methods before postreset is invoked. * * LOCKING: * Kernel thread context (may sleep) */ void ata_std_postreset(struct ata_link *link, unsigned int *classes) { u32 serror; DPRINTK("ENTER\n"); /* reset complete, clear SError */ if (!sata_scr_read(link, SCR_ERROR, &serror)) sata_scr_write(link, SCR_ERROR, serror); /* print link status */ sata_print_link_status(link); DPRINTK("EXIT\n"); } /** * ata_dev_same_device - Determine whether new ID matches configured device * @dev: device to compare against * @new_class: class of the new device * @new_id: IDENTIFY page of the new device * * Compare @new_class and @new_id against @dev and determine * whether @dev is the device indicated by @new_class and * @new_id. * * LOCKING: * None. * * RETURNS: * 1 if @dev matches @new_class and @new_id, 0 otherwise. */ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, const u16 *new_id) { const u16 *old_id = dev->id; unsigned char model[2][ATA_ID_PROD_LEN + 1]; unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; if (dev->class != new_class) { ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", dev->class, new_class); return 0; } ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); if (strcmp(model[0], model[1])) { ata_dev_printk(dev, KERN_INFO, "model number mismatch " "'%s' != '%s'\n", model[0], model[1]); return 0; } if (strcmp(serial[0], serial[1])) { ata_dev_printk(dev, KERN_INFO, "serial number mismatch " "'%s' != '%s'\n", serial[0], serial[1]); return 0; } return 1; } /** * ata_dev_reread_id - Re-read IDENTIFY data * @dev: target ATA device * @readid_flags: read ID flags * * Re-read IDENTIFY page and make sure @dev is still attached to * the port. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, negative errno otherwise */ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) { unsigned int class = dev->class; u16 *id = (void *)dev->link->ap->sector_buf; int rc; /* read ID data */ rc = ata_dev_read_id(dev, &class, readid_flags, id); if (rc) return rc; /* is the device still there? */ if (!ata_dev_same_device(dev, class, id)) return -ENODEV; memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); return 0; } /** * ata_dev_revalidate - Revalidate ATA device * @dev: device to revalidate * @new_class: new class code * @readid_flags: read ID flags * * Re-read IDENTIFY page, make sure @dev is still attached to the * port and reconfigure it according to the new IDENTIFY page. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, negative errno otherwise */ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, unsigned int readid_flags) { u64 n_sectors = dev->n_sectors; u64 n_native_sectors = dev->n_native_sectors; int rc; if (!ata_dev_enabled(dev)) return -ENODEV; /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ if (ata_class_enabled(new_class) && new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI && new_class != ATA_DEV_SEMB) { ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", dev->class, new_class); rc = -ENODEV; goto fail; } /* re-read ID */ rc = ata_dev_reread_id(dev, readid_flags); if (rc) goto fail; /* configure device according to the new ID */ rc = ata_dev_configure(dev); if (rc) goto fail; /* verify n_sectors hasn't changed */ if (dev->class != ATA_DEV_ATA || !n_sectors || dev->n_sectors == n_sectors) return 0; /* n_sectors has changed */ ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", (unsigned long long)n_sectors, (unsigned long long)dev->n_sectors); /* * Something could have caused HPA to be unlocked * involuntarily. If n_native_sectors hasn't changed and the * new size matches it, keep the device. */ if (dev->n_native_sectors == n_native_sectors && dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { ata_dev_printk(dev, KERN_WARNING, "new n_sectors matches native, probably " "late HPA unlock, n_sectors updated\n"); /* use the larger n_sectors */ return 0; } /* * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try * unlocking HPA in those cases. * * https://bugzilla.kernel.org/show_bug.cgi?id=15396 */ if (dev->n_native_sectors == n_native_sectors && dev->n_sectors < n_sectors && n_sectors == n_native_sectors && !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { ata_dev_printk(dev, KERN_WARNING, "old n_sectors matches native, probably " "late HPA lock, will try to unlock HPA\n"); /* try unlocking HPA */ dev->flags |= ATA_DFLAG_UNLOCK_HPA; rc = -EIO; } else rc = -ENODEV; /* restore original n_[native_]sectors and fail */ dev->n_native_sectors = n_native_sectors; dev->n_sectors = n_sectors; fail: ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); return rc; } struct ata_blacklist_entry { const char *model_num; const char *model_rev; unsigned long horkage; }; static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices with DMA related problems under Linux */ { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, { "CRD-84", NULL, ATA_HORKAGE_NODMA }, { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ /* NCQ is slow */ { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, /* http://thread.gmane.org/gmane.linux.ide/14907 */ { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, /* NCQ is broken */ { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, /* Seagate NCQ + FLUSH CACHE firmware bug */ { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, /* this one allows HPA unlocking but fails IOs on the area */ { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, /* Devices which report 1 sector over size HPA */ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, /* Devices which get the IVB wrong */ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, /* Maybe we should just blacklist TSSTcorp... */ { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, /* Devices that do not need bridging limits applied */ { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, /* Devices which aren't very happy with higher link speeds */ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, /* * Devices which choke on SETXFER. Applies only if both the * device and controller are SATA. */ { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, /* End Marker */ { } }; static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) { const char *p; int len; /* * check for trailing wildcard: *\0 */ p = strchr(patt, wildchar); if (p && ((*(p + 1)) == 0)) len = p - patt; else { len = strlen(name); if (!len) { if (!*patt) return 0; return -1; } } return strncmp(patt, name, len); } static unsigned long ata_dev_blacklisted(const struct ata_device *dev) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; const struct ata_blacklist_entry *ad = ata_device_blacklist; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); while (ad->model_num) { if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { if (ad->model_rev == NULL) return ad->horkage; if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) return ad->horkage; } ad++; } return 0; } static int ata_dma_blacklisted(const struct ata_device *dev) { /* We don't support polling DMA. * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) * if the LLDD handles only interrupts in the HSM_ST_LAST state. */ if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && (dev->flags & ATA_DFLAG_CDB_INTR)) return 1; return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; } /** * ata_is_40wire - check drive side detection * @dev: device * * Perform drive side detection decoding, allowing for device vendors * who can't follow the documentation. */ static int ata_is_40wire(struct ata_device *dev) { if (dev->horkage & ATA_HORKAGE_IVB) return ata_drive_40wire_relaxed(dev->id); return ata_drive_40wire(dev->id); } /** * cable_is_40wire - 40/80/SATA decider * @ap: port to consider * * This function encapsulates the policy for speed management * in one place. At the moment we don't cache the result but * there is a good case for setting ap->cbl to the result when * we are called with unknown cables (and figuring out if it * impacts hotplug at all). * * Return 1 if the cable appears to be 40 wire. */ static int cable_is_40wire(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; /* If the controller thinks we are 40 wire, we are. */ if (ap->cbl == ATA_CBL_PATA40) return 1; /* If the controller thinks we are 80 wire, we are. */ if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) return 0; /* If the system is known to be 40 wire short cable (eg * laptop), then we allow 80 wire modes even if the drive * isn't sure. */ if (ap->cbl == ATA_CBL_PATA40_SHORT) return 0; /* If the controller doesn't know, we scan. * * Note: We look for all 40 wire detects at this point. Any * 80 wire detect is taken to be 80 wire cable because * - in many setups only the one drive (slave if present) will * give a valid detect * - if you have a non detect capable drive you don't want it * to colour the choice */ ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ENABLED) { if (!ata_is_40wire(dev)) return 0; } } return 1; } /** * ata_dev_xfermask - Compute supported xfermask of the given device * @dev: Device to compute xfermask for * * Compute supported xfermask of @dev and store it in * dev->*_mask. This function is responsible for applying all * known limits including host controller limits, device * blacklist, etc... * * LOCKING: * None. */ static void ata_dev_xfermask(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_host *host = ap->host; unsigned long xfer_mask; /* controller modes available */ xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, ap->udma_mask); /* drive modes available */ xfer_mask &= ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, dev->udma_mask); xfer_mask &= ata_id_xfermask(dev->id); /* * CFA Advanced TrueIDE timings are not allowed on a shared * cable */ if (ata_dev_pair(dev)) { /* No PIO5 or PIO6 */ xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); /* No MWDMA3 or MWDMA 4 */ xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); } if (ata_dma_blacklisted(dev)) { xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); ata_dev_printk(dev, KERN_WARNING, "device is on DMA blacklist, disabling DMA\n"); } if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed && host->simplex_claimed != ap) { xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " "other device, disabling DMA\n"); } if (ap->flags & ATA_FLAG_NO_IORDY) xfer_mask &= ata_pio_mask_no_iordy(dev); if (ap->ops->mode_filter) xfer_mask = ap->ops->mode_filter(dev, xfer_mask); /* Apply cable rule here. Don't apply it early because when * we handle hot plug the cable type can itself change. * Check this last so that we know if the transfer rate was * solely limited by the cable. * Unknown or 80 wire cables reported host side are checked * drive side as well. Cases where we know a 40wire cable * is used safely for 80 are not checked here. */ if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) /* UDMA/44 or higher would be available */ if (cable_is_40wire(ap)) { ata_dev_printk(dev, KERN_WARNING, "limited to UDMA/33 due to 40-wire cable\n"); xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); } ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, &dev->udma_mask); } /** * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command * @dev: Device to which command will be sent * * Issue SET FEATURES - XFER MODE command to device @dev * on port @ap. * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) { struct ata_taskfile tf; unsigned int err_mask; /* set up set-features taskfile */ DPRINTK("set features - xfer mode\n"); /* Some controllers and ATAPI devices show flaky interrupt * behavior after setting xfer mode. Use polling instead. */ ata_tf_init(dev, &tf); tf.command = ATA_CMD_SET_FEATURES; tf.feature = SETFEATURES_XFER; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; tf.protocol = ATA_PROT_NODATA; /* If we are using IORDY we must send the mode setting command */ if (ata_pio_need_iordy(dev)) tf.nsect = dev->xfer_mode; /* If the device has IORDY and the controller does not - turn it off */ else if (ata_id_has_iordy(dev->id)) tf.nsect = 0x01; else /* In the ancient relic department - skip all of this */ return 0; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } /** * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES * @dev: Device to which command will be sent * @enable: Whether to enable or disable the feature * @feature: The sector count represents the feature to set * * Issue SET FEATURES - SATA FEATURES command to device @dev * on port @ap with sector count * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) { struct ata_taskfile tf; unsigned int err_mask; /* set up set-features taskfile */ DPRINTK("set features - SATA features\n"); ata_tf_init(dev, &tf); tf.command = ATA_CMD_SET_FEATURES; tf.feature = enable; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; tf.nsect = feature; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } /** * ata_dev_init_params - Issue INIT DEV PARAMS command * @dev: Device to which command will be sent * @heads: Number of heads (taskfile parameter) * @sectors: Number of sectors (taskfile parameter) * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors) { struct ata_taskfile tf; unsigned int err_mask; /* Number of sectors per track 1-255. Number of heads 1-16 */ if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) return AC_ERR_INVALID; /* set up init dev params taskfile */ DPRINTK("init dev params \n"); ata_tf_init(dev, &tf); tf.command = ATA_CMD_INIT_DEV_PARAMS; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; tf.nsect = sectors; tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); /* A clean abort indicates an original or just out of spec drive and we should continue as we issue the setup based on the drive reported working geometry */ if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) err_mask = 0; DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } /** * ata_sg_clean - Unmap DMA memory associated with command * @qc: Command containing DMA memory to be released * * Unmap all mapped DMA memory associated with this command. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_sg_clean(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scatterlist *sg = qc->sg; int dir = qc->dma_dir; WARN_ON_ONCE(sg == NULL); VPRINTK("unmapping %u sg elements\n", qc->n_elem); if (qc->n_elem) dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); qc->flags &= ~ATA_QCFLAG_DMAMAP; qc->sg = NULL; } /** * atapi_check_dma - Check whether ATAPI DMA can be supported * @qc: Metadata associated with taskfile to check * * Allow low-level driver to filter ATA PACKET commands, returning * a status indicating whether or not it is OK to use DMA for the * supplied PACKET command. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: 0 when ATAPI DMA can be used * nonzero otherwise */ int atapi_check_dma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a * few ATAPI devices choke on such DMA requests. */ if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && unlikely(qc->nbytes & 15)) return 1; if (ap->ops->check_atapi_dma) return ap->ops->check_atapi_dma(qc); return 0; } /** * ata_std_qc_defer - Check whether a qc needs to be deferred * @qc: ATA command in question * * Non-NCQ commands cannot run with any other command, NCQ or * not. As upper layer only knows the queue depth, we are * responsible for maintaining exclusion. This function checks * whether a new command @qc can be issued. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * ATA_DEFER_* if deferring is needed, 0 otherwise. */ int ata_std_qc_defer(struct ata_queued_cmd *qc) { struct ata_link *link = qc->dev->link; if (qc->tf.protocol == ATA_PROT_NCQ) { if (!ata_tag_valid(link->active_tag)) return 0; } else { if (!ata_tag_valid(link->active_tag) && !link->sactive) return 0; } return ATA_DEFER_LINK; } void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } /** * ata_sg_init - Associate command with scatter-gather table. * @qc: Command to be associated * @sg: Scatter-gather table. * @n_elem: Number of elements in s/g table. * * Initialize the data-related elements of queued_cmd @qc * to point to a scatter-gather table @sg, containing @n_elem * elements. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem) { qc->sg = sg; qc->n_elem = n_elem; qc->cursg = qc->sg; } /** * ata_sg_setup - DMA-map the scatter-gather table associated with a command. * @qc: Command with scatter-gather table to be mapped. * * DMA-map the scatter-gather table associated with queued_cmd @qc. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Zero on success, negative on error. * */ static int ata_sg_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int n_elem; VPRINTK("ENTER, ata%u\n", ap->print_id); n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); if (n_elem < 1) return -1; DPRINTK("%d sg elements mapped\n", n_elem); qc->orig_n_elem = qc->n_elem; qc->n_elem = n_elem; qc->flags |= ATA_QCFLAG_DMAMAP; return 0; } /** * swap_buf_le16 - swap halves of 16-bit words in place * @buf: Buffer to swap * @buf_words: Number of 16-bit words in buffer. * * Swap halves of 16-bit words if needed to convert from * little-endian byte order to native cpu byte order, or * vice-versa. * * LOCKING: * Inherited from caller. */ void swap_buf_le16(u16 *buf, unsigned int buf_words) { #ifdef __BIG_ENDIAN unsigned int i; for (i = 0; i < buf_words; i++) buf[i] = le16_to_cpu(buf[i]); #endif /* __BIG_ENDIAN */ } /** * ata_qc_new - Request an available ATA command, for queueing * @ap: target port * * LOCKING: * None. */ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) { struct ata_queued_cmd *qc = NULL; unsigned int i; /* no command while frozen */ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) return NULL; /* the last tag is reserved for internal command. */ for (i = 0; i < ATA_MAX_QUEUE - 1; i++) if (!test_and_set_bit(i, &ap->qc_allocated)) { qc = __ata_qc_from_tag(ap, i); break; } if (qc) qc->tag = i; return qc; } /** * ata_qc_new_init - Request an available ATA command, and initialize it * @dev: Device from whom we request an available command structure * * LOCKING: * None. */ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; struct ata_queued_cmd *qc; qc = ata_qc_new(ap); if (qc) { qc->scsicmd = NULL; qc->ap = ap; qc->dev = dev; ata_qc_reinit(qc); } return qc; } /** * ata_qc_free - free unused ata_queued_cmd * @qc: Command to complete * * Designed to free unused ata_queued_cmd object * in case something prevents using it. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_qc_free(struct ata_queued_cmd *qc) { struct ata_port *ap; unsigned int tag; WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ap = qc->ap; qc->flags = 0; tag = qc->tag; if (likely(ata_tag_valid(tag))) { qc->tag = ATA_TAG_POISON; clear_bit(tag, &ap->qc_allocated); } } void __ata_qc_complete(struct ata_queued_cmd *qc) { struct ata_port *ap; struct ata_link *link; WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); ap = qc->ap; link = qc->dev->link; if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) ata_sg_clean(qc); /* command should be marked inactive atomically with qc completion */ if (qc->tf.protocol == ATA_PROT_NCQ) { link->sactive &= ~(1 << qc->tag); if (!link->sactive) ap->nr_active_links--; } else { link->active_tag = ATA_TAG_POISON; ap->nr_active_links--; } /* clear exclusive status */ if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && ap->excl_link == link)) ap->excl_link = NULL; /* atapi: mark qc as inactive to prevent the interrupt handler * from completing the command twice later, before the error handler * is called. (when rc != 0 and atapi request sense is needed) */ qc->flags &= ~ATA_QCFLAG_ACTIVE; ap->qc_active &= ~(1 << qc->tag); /* call completion callback */ qc->complete_fn(qc); } static void fill_result_tf(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; qc->result_tf.flags = qc->tf.flags; ap->ops->qc_fill_rtf(qc); } static void ata_verify_xfer(struct ata_queued_cmd *qc) { struct ata_device *dev = qc->dev; if (ata_tag_internal(qc->tag)) return; if (ata_is_nodata(qc->tf.protocol)) return; if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) return; dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; } /** * ata_qc_complete - Complete an active ATA command * @qc: Command to complete * * Indicate to the mid and upper layers that an ATA * command has completed, with either an ok or not-ok status. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_qc_complete(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; /* XXX: New EH and old EH use different mechanisms to * synchronize EH with regular execution path. * * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. * Normal execution path is responsible for not accessing a * failed qc. libata core enforces the rule by returning NULL * from ata_qc_from_tag() for failed qcs. * * Old EH depends on ata_qc_complete() nullifying completion * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does * not synchronize with interrupt handler. Only PIO task is * taken care of. */ if (ap->ops->error_handler) { struct ata_device *dev = qc->dev; struct ata_eh_info *ehi = &dev->link->eh_info; if (unlikely(qc->err_mask)) qc->flags |= ATA_QCFLAG_FAILED; if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { /* always fill result TF for failed qc */ fill_result_tf(qc); if (!ata_tag_internal(qc->tag)) ata_qc_schedule_eh(qc); else __ata_qc_complete(qc); return; } WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); /* read result TF if requested */ if (qc->flags & ATA_QCFLAG_RESULT_TF) fill_result_tf(qc); /* Some commands need post-processing after successful * completion. */ switch (qc->tf.command) { case ATA_CMD_SET_FEATURES: if (qc->tf.feature != SETFEATURES_WC_ON && qc->tf.feature != SETFEATURES_WC_OFF) break; /* fall through */ case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ case ATA_CMD_SET_MULTI: /* multi_count changed */ /* revalidate device */ ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; ata_port_schedule_eh(ap); break; case ATA_CMD_SLEEP: dev->flags |= ATA_DFLAG_SLEEPING; break; } if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) ata_verify_xfer(qc); __ata_qc_complete(qc); } else { if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) return; /* read result TF if failed or requested */ if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) fill_result_tf(qc); __ata_qc_complete(qc); } } /** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question * @qc_active: new qc_active mask * * Complete in-flight commands. This functions is meant to be * called from low-level driver's interrupt routine to complete * requests normally. ap->qc_active and @qc_active is compared * and commands are completed accordingly. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of completed commands on success, -errno otherwise. */ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) { int nr_done = 0; u32 done_mask; done_mask = ap->qc_active ^ qc_active; if (unlikely(done_mask & qc_active)) { ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " "(%08x->%08x)\n", ap->qc_active, qc_active); return -EINVAL; } while (done_mask) { struct ata_queued_cmd *qc; unsigned int tag = __ffs(done_mask); qc = ata_qc_from_tag(ap, tag); if (qc) { ata_qc_complete(qc); nr_done++; } done_mask &= ~(1 << tag); } return nr_done; } /** * ata_qc_issue - issue taskfile to device * @qc: command to issue to device * * Prepare an ATA command to submission to device. * This includes mapping the data into a DMA-able * area, filling in the S/G table, and finally * writing the taskfile to hardware, starting the command. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_link *link = qc->dev->link; u8 prot = qc->tf.protocol; /* Make sure only one non-NCQ command is outstanding. The * check is skipped for old EH because it reuses active qc to * request ATAPI sense. */ WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); if (ata_is_ncq(prot)) { WARN_ON_ONCE(link->sactive & (1 << qc->tag)); if (!link->sactive) ap->nr_active_links++; link->sactive |= 1 << qc->tag; } else { WARN_ON_ONCE(link->sactive); ap->nr_active_links++; link->active_tag = qc->tag; } qc->flags |= ATA_QCFLAG_ACTIVE; ap->qc_active |= 1 << qc->tag; /* We guarantee to LLDs that they will have at least one * non-zero sg if the command is a data command. */ BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); if (ata_is_dma(prot) || (ata_is_pio(prot) && (ap->flags & ATA_FLAG_PIO_DMA))) if (ata_sg_setup(qc)) goto sg_err; /* if device is sleeping, schedule reset and abort the link */ if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { link->eh_info.action |= ATA_EH_RESET; ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); ata_link_abort(link); return; } ap->ops->qc_prep(qc); qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) goto err; return; sg_err: qc->err_mask |= AC_ERR_SYSTEM; err: ata_qc_complete(qc); } /** * sata_scr_valid - test whether SCRs are accessible * @link: ATA link to test SCR accessibility for * * Test whether SCRs are accessible for @link. * * LOCKING: * None. * * RETURNS: * 1 if SCRs are accessible, 0 otherwise. */ int sata_scr_valid(struct ata_link *link) { struct ata_port *ap = link->ap; return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; } /** * sata_scr_read - read SCR register of the specified port * @link: ATA link to read SCR for * @reg: SCR to read * @val: Place to store read value * * Read SCR register @reg of @link into *@val. This function is * guaranteed to succeed if @link is ap->link, the cable type of * the port is SATA and the port implements ->scr_read. * * LOCKING: * None if @link is ap->link. Kernel thread context otherwise. * * RETURNS: * 0 on success, negative errno on failure. */ int sata_scr_read(struct ata_link *link, int reg, u32 *val) { if (ata_is_host_link(link)) { if (sata_scr_valid(link)) return link->ap->ops->scr_read(link, reg, val); return -EOPNOTSUPP; } return sata_pmp_scr_read(link, reg, val); } /** * sata_scr_write - write SCR register of the specified port * @link: ATA link to write SCR for * @reg: SCR to write * @val: value to write * * Write @val to SCR register @reg of @link. This function is * guaranteed to succeed if @link is ap->link, the cable type of * the port is SATA and the port implements ->scr_read. * * LOCKING: * None if @link is ap->link. Kernel thread context otherwise. * * RETURNS: * 0 on success, negative errno on failure. */ int sata_scr_write(struct ata_link *link, int reg, u32 val) { if (ata_is_host_link(link)) { if (sata_scr_valid(link)) return link->ap->ops->scr_write(link, reg, val); return -EOPNOTSUPP; } return sata_pmp_scr_write(link, reg, val); } /** * sata_scr_write_flush - write SCR register of the specified port and flush * @link: ATA link to write SCR for * @reg: SCR to write * @val: value to write * * This function is identical to sata_scr_write() except that this * function performs flush after writing to the register. * * LOCKING: * None if @link is ap->link. Kernel thread context otherwise. * * RETURNS: * 0 on success, negative errno on failure. */ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) { if (ata_is_host_link(link)) { int rc; if (sata_scr_valid(link)) { rc = link->ap->ops->scr_write(link, reg, val); if (rc == 0) rc = link->ap->ops->scr_read(link, reg, &val); return rc; } return -EOPNOTSUPP; } return sata_pmp_scr_write(link, reg, val); } /** * ata_phys_link_online - test whether the given link is online * @link: ATA link to test * * Test whether @link is online. Note that this function returns * 0 if online status of @link cannot be obtained, so * ata_link_online(link) != !ata_link_offline(link). * * LOCKING: * None. * * RETURNS: * True if the port online status is available and online. */ bool ata_phys_link_online(struct ata_link *link) { u32 sstatus; if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && ata_sstatus_online(sstatus)) return true; return false; } /** * ata_phys_link_offline - test whether the given link is offline * @link: ATA link to test * * Test whether @link is offline. Note that this function * returns 0 if offline status of @link cannot be obtained, so * ata_link_online(link) != !ata_link_offline(link). * * LOCKING: * None. * * RETURNS: * True if the port offline status is available and offline. */ bool ata_phys_link_offline(struct ata_link *link) { u32 sstatus; if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && !ata_sstatus_online(sstatus)) return true; return false; } /** * ata_link_online - test whether the given link is online * @link: ATA link to test * * Test whether @link is online. This is identical to * ata_phys_link_online() when there's no slave link. When * there's a slave link, this function should only be called on * the master link and will return true if any of M/S links is * online. * * LOCKING: * None. * * RETURNS: * True if the port online status is available and online. */ bool ata_link_online(struct ata_link *link) { struct ata_link *slave = link->ap->slave_link; WARN_ON(link == slave); /* shouldn't be called on slave link */ return ata_phys_link_online(link) || (slave && ata_phys_link_online(slave)); } /** * ata_link_offline - test whether the given link is offline * @link: ATA link to test * * Test whether @link is offline. This is identical to * ata_phys_link_offline() when there's no slave link. When * there's a slave link, this function should only be called on * the master link and will return true if both M/S links are * offline. * * LOCKING: * None. * * RETURNS: * True if the port offline status is available and offline. */ bool ata_link_offline(struct ata_link *link) { struct ata_link *slave = link->ap->slave_link; WARN_ON(link == slave); /* shouldn't be called on slave link */ return ata_phys_link_offline(link) && (!slave || ata_phys_link_offline(slave)); } #ifdef CONFIG_PM static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, unsigned int action, unsigned int ehi_flags, int wait) { unsigned long flags; int i, rc; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct ata_link *link; /* Previous resume operation might still be in * progress. Wait for PM_PENDING to clear. */ if (ap->pflags & ATA_PFLAG_PM_PENDING) { ata_port_wait_eh(ap); WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); } /* request PM ops to EH */ spin_lock_irqsave(ap->lock, flags); ap->pm_mesg = mesg; if (wait) { rc = 0; ap->pm_result = &rc; } ap->pflags |= ATA_PFLAG_PM_PENDING; ata_for_each_link(link, ap, HOST_FIRST) { link->eh_info.action |= action; link->eh_info.flags |= ehi_flags; } ata_port_schedule_eh(ap); spin_unlock_irqrestore(ap->lock, flags); /* wait and check result */ if (wait) { ata_port_wait_eh(ap); WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); if (rc) return rc; } } return 0; } /** * ata_host_suspend - suspend host * @host: host to suspend * @mesg: PM message * * Suspend @host. Actual operation is performed by EH. This * function requests EH to perform PM operations and waits for EH * to finish. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) { unsigned int ehi_flags = ATA_EHI_QUIET; int rc; /* * disable link pm on all ports before requesting * any pm activity */ ata_lpm_enable(host); /* * On some hardware, device fails to respond after spun down * for suspend. As the device won't be used before being * resumed, we don't need to touch the device. Ask EH to skip * the usual stuff and proceed directly to suspend. * * http://thread.gmane.org/gmane.linux.ide/46764 */ if (mesg.event == PM_EVENT_SUSPEND) ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); if (rc == 0) host->dev->power.power_state = mesg; return rc; } /** * ata_host_resume - resume host * @host: host to resume * * Resume @host. Actual operation is performed by EH. This * function requests EH to perform PM operations and returns. * Note that all resume operations are performed parallely. * * LOCKING: * Kernel thread context (may sleep). */ void ata_host_resume(struct ata_host *host) { ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); host->dev->power.power_state = PMSG_ON; /* reenable link pm */ ata_lpm_disable(host); } #endif /** * ata_dev_init - Initialize an ata_device structure * @dev: Device structure to initialize * * Initialize @dev in preparation for probing. * * LOCKING: * Inherited from caller. */ void ata_dev_init(struct ata_device *dev) { struct ata_link *link = ata_dev_phys_link(dev); struct ata_port *ap = link->ap; unsigned long flags; /* SATA spd limit is bound to the attached device, reset together */ link->sata_spd_limit = link->hw_sata_spd_limit; link->sata_spd = 0; /* High bits of dev->flags are used to record warm plug * requests which occur asynchronously. Synchronize using * host lock. */ spin_lock_irqsave(ap->lock, flags); dev->flags &= ~ATA_DFLAG_INIT_MASK; dev->horkage = 0; spin_unlock_irqrestore(ap->lock, flags); memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); dev->pio_mask = UINT_MAX; dev->mwdma_mask = UINT_MAX; dev->udma_mask = UINT_MAX; } /** * ata_link_init - Initialize an ata_link structure * @ap: ATA port link is attached to * @link: Link structure to initialize * @pmp: Port multiplier port number * * Initialize @link. * * LOCKING: * Kernel thread context (may sleep) */ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) { int i; /* clear everything except for devices */ memset(link, 0, offsetof(struct ata_link, device[0])); link->ap = ap; link->pmp = pmp; link->active_tag = ATA_TAG_POISON; link->hw_sata_spd_limit = UINT_MAX; /* can't use iterator, ap isn't initialized yet */ for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *dev = &link->device[i]; dev->link = link; dev->devno = dev - link->device; #ifdef CONFIG_ATA_ACPI dev->gtf_filter = ata_acpi_gtf_filter; #endif ata_dev_init(dev); } } /** * sata_link_init_spd - Initialize link->sata_spd_limit * @link: Link to configure sata_spd_limit for * * Initialize @link->[hw_]sata_spd_limit to the currently * configured value. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ int sata_link_init_spd(struct ata_link *link) { u8 spd; int rc; rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); if (rc) return rc; spd = (link->saved_scontrol >> 4) & 0xf; if (spd) link->hw_sata_spd_limit &= (1 << spd) - 1; ata_force_link_limits(link); link->sata_spd_limit = link->hw_sata_spd_limit; return 0; } /** * ata_port_alloc - allocate and initialize basic ATA port resources * @host: ATA host this allocated port belongs to * * Allocate and initialize basic ATA port resources. * * RETURNS: * Allocate ATA port on success, NULL on failure. * * LOCKING: * Inherited from calling layer (may sleep). */ struct ata_port *ata_port_alloc(struct ata_host *host) { struct ata_port *ap; DPRINTK("ENTER\n"); ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (!ap) return NULL; ap->pflags |= ATA_PFLAG_INITIALIZING; ap->lock = &host->lock; ap->print_id = -1; ap->host = host; ap->dev = host->dev; #if defined(ATA_VERBOSE_DEBUG) /* turn on all debugging levels */ ap->msg_enable = 0x00FF; #elif defined(ATA_DEBUG) ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; #else ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; #endif INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); init_completion(&ap->park_req_pending); init_timer_deferrable(&ap->fastdrain_timer); ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; ap->fastdrain_timer.data = (unsigned long)ap; ap->cbl = ATA_CBL_NONE; ata_link_init(ap, &ap->link, 0); #ifdef ATA_IRQ_TRAP ap->stats.unhandled_irq = 1; ap->stats.idle_irq = 1; #endif ata_sff_port_init(ap); return ap; } static void ata_host_release(struct device *gendev, void *res) { struct ata_host *host = dev_get_drvdata(gendev); int i; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (!ap) continue; if (ap->scsi_host) scsi_host_put(ap->scsi_host); kfree(ap->pmp_link); kfree(ap->slave_link); kfree(ap); host->ports[i] = NULL; } dev_set_drvdata(gendev, NULL); } /** * ata_host_alloc - allocate and init basic ATA host resources * @dev: generic device this host is associated with * @max_ports: maximum number of ATA ports associated with this host * * Allocate and initialize basic ATA host resources. LLD calls * this function to allocate a host, initializes it fully and * attaches it using ata_host_register(). * * @max_ports ports are allocated and host->n_ports is * initialized to @max_ports. The caller is allowed to decrease * host->n_ports before calling ata_host_register(). The unused * ports will be automatically freed on registration. * * RETURNS: * Allocate ATA host on success, NULL on failure. * * LOCKING: * Inherited from calling layer (may sleep). */ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) { struct ata_host *host; size_t sz; int i; DPRINTK("ENTER\n"); if (!devres_open_group(dev, NULL, GFP_KERNEL)) return NULL; /* alloc a container for our list of ATA ports (buses) */ sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); /* alloc a container for our list of ATA ports (buses) */ host = devres_alloc(ata_host_release, sz, GFP_KERNEL); if (!host) goto err_out; devres_add(dev, host); dev_set_drvdata(dev, host); spin_lock_init(&host->lock); host->dev = dev; host->n_ports = max_ports; /* allocate ports bound to this host */ for (i = 0; i < max_ports; i++) { struct ata_port *ap; ap = ata_port_alloc(host); if (!ap) goto err_out; ap->port_no = i; host->ports[i] = ap; } devres_remove_group(dev, NULL); return host; err_out: devres_release_group(dev, NULL); return NULL; } /** * ata_host_alloc_pinfo - alloc host and init with port_info array * @dev: generic device this host is associated with * @ppi: array of ATA port_info to initialize host with * @n_ports: number of ATA ports attached to this host * * Allocate ATA host and initialize with info from @ppi. If NULL * terminated, @ppi may contain fewer entries than @n_ports. The * last entry will be used for the remaining ports. * * RETURNS: * Allocate ATA host on success, NULL on failure. * * LOCKING: * Inherited from calling layer (may sleep). */ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { const struct ata_port_info *pi; struct ata_host *host; int i, j; host = ata_host_alloc(dev, n_ports); if (!host) return NULL; for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) pi = ppi[j++]; ap->pio_mask = pi->pio_mask; ap->mwdma_mask = pi->mwdma_mask; ap->udma_mask = pi->udma_mask; ap->flags |= pi->flags; ap->link.flags |= pi->link_flags; ap->ops = pi->port_ops; if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) host->ops = pi->port_ops; } return host; } /** * ata_slave_link_init - initialize slave link * @ap: port to initialize slave link for * * Create and initialize slave link for @ap. This enables slave * link handling on the port. * * In libata, a port contains links and a link contains devices. * There is single host link but if a PMP is attached to it, * there can be multiple fan-out links. On SATA, there's usually * a single device connected to a link but PATA and SATA * controllers emulating TF based interface can have two - master * and slave. * * However, there are a few controllers which don't fit into this * abstraction too well - SATA controllers which emulate TF * interface with both master and slave devices but also have * separate SCR register sets for each device. These controllers * need separate links for physical link handling * (e.g. onlineness, link speed) but should be treated like a * traditional M/S controller for everything else (e.g. command * issue, softreset). * * slave_link is libata's way of handling this class of * controllers without impacting core layer too much. For * anything other than physical link handling, the default host * link is used for both master and slave. For physical link * handling, separate @ap->slave_link is used. All dirty details * are implemented inside libata core layer. From LLD's POV, the * only difference is that prereset, hardreset and postreset are * called once more for the slave link, so the reset sequence * looks like the following. * * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> * softreset(M) -> postreset(M) -> postreset(S) * * Note that softreset is called only for the master. Softreset * resets both M/S by definition, so SRST on master should handle * both (the standard method will work just fine). * * LOCKING: * Should be called before host is registered. * * RETURNS: * 0 on success, -errno on failure. */ int ata_slave_link_init(struct ata_port *ap) { struct ata_link *link; WARN_ON(ap->slave_link); WARN_ON(ap->flags & ATA_FLAG_PMP); link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; ata_link_init(ap, link, 1); ap->slave_link = link; return 0; } static void ata_host_stop(struct device *gendev, void *res) { struct ata_host *host = dev_get_drvdata(gendev); int i; WARN_ON(!(host->flags & ATA_HOST_STARTED)); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ap->ops->port_stop) ap->ops->port_stop(ap); } if (host->ops->host_stop) host->ops->host_stop(host); } /** * ata_finalize_port_ops - finalize ata_port_operations * @ops: ata_port_operations to finalize * * An ata_port_operations can inherit from another ops and that * ops can again inherit from another. This can go on as many * times as necessary as long as there is no loop in the * inheritance chain. * * Ops tables are finalized when the host is started. NULL or * unspecified entries are inherited from the closet ancestor * which has the method and the entry is populated with it. * After finalization, the ops table directly points to all the * methods and ->inherits is no longer necessary and cleared. * * Using ATA_OP_NULL, inheriting ops can force a method to NULL. * * LOCKING: * None. */ static void ata_finalize_port_ops(struct ata_port_operations *ops) { static DEFINE_SPINLOCK(lock); const struct ata_port_operations *cur; void **begin = (void **)ops; void **end = (void **)&ops->inherits; void **pp; if (!ops || !ops->inherits) return; spin_lock(&lock); for (cur = ops->inherits; cur; cur = cur->inherits) { void **inherit = (void **)cur; for (pp = begin; pp < end; pp++, inherit++) if (!*pp) *pp = *inherit; } for (pp = begin; pp < end; pp++) if (IS_ERR(*pp)) *pp = NULL; ops->inherits = NULL; spin_unlock(&lock); } /** * ata_host_start - start and freeze ports of an ATA host * @host: ATA host to start ports for * * Start and then freeze ports of @host. Started status is * recorded in host->flags, so this function can be called * multiple times. Ports are guaranteed to get started only * once. If host->ops isn't initialized yet, its set to the * first non-dummy port ops. * * LOCKING: * Inherited from calling layer (may sleep). * * RETURNS: * 0 if all ports are started successfully, -errno otherwise. */ int ata_host_start(struct ata_host *host) { int have_stop = 0; void *start_dr = NULL; int i, rc; if (host->flags & ATA_HOST_STARTED) return 0; ata_finalize_port_ops(host->ops); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_finalize_port_ops(ap->ops); if (!host->ops && !ata_port_is_dummy(ap)) host->ops = ap->ops; if (ap->ops->port_stop) have_stop = 1; } if (host->ops->host_stop) have_stop = 1; if (have_stop) { start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); if (!start_dr) return -ENOMEM; } for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ap->ops->port_start) { rc = ap->ops->port_start(ap); if (rc) { if (rc != -ENODEV) dev_printk(KERN_ERR, host->dev, "failed to start port %d " "(errno=%d)\n", i, rc); goto err_out; } } ata_eh_freeze_port(ap); } if (start_dr) devres_add(host->dev, start_dr); host->flags |= ATA_HOST_STARTED; return 0; err_out: while (--i >= 0) { struct ata_port *ap = host->ports[i]; if (ap->ops->port_stop) ap->ops->port_stop(ap); } devres_free(start_dr); return rc; } /** * ata_sas_host_init - Initialize a host struct * @host: host to initialize * @dev: device host is attached to * @flags: host flags * @ops: port_ops * * LOCKING: * PCI/etc. bus probe sem. * */ /* KILLME - the only user left is ipr */ void ata_host_init(struct ata_host *host, struct device *dev, unsigned long flags, struct ata_port_operations *ops) { spin_lock_init(&host->lock); host->dev = dev; host->flags = flags; host->ops = ops; } static void async_port_probe(void *data, async_cookie_t cookie) { int rc; struct ata_port *ap = data; /* * If we're not allowed to scan this host in parallel, * we need to wait until all previous scans have completed * before going further. * Jeff Garzik says this is only within a controller, so we * don't need to wait for port 0, only for later ports. */ if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) async_synchronize_cookie(cookie); /* probe */ if (ap->ops->error_handler) { struct ata_eh_info *ehi = &ap->link.eh_info; unsigned long flags; /* kick EH for boot probing */ spin_lock_irqsave(ap->lock, flags); ehi->probe_mask |= ATA_ALL_DEVICES; ehi->action |= ATA_EH_RESET | ATA_EH_LPM; ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; ap->pflags &= ~ATA_PFLAG_INITIALIZING; ap->pflags |= ATA_PFLAG_LOADING; ata_port_schedule_eh(ap); spin_unlock_irqrestore(ap->lock, flags); /* wait for EH to finish */ ata_port_wait_eh(ap); } else { DPRINTK("ata%u: bus probe begin\n", ap->print_id); rc = ata_bus_probe(ap); DPRINTK("ata%u: bus probe end\n", ap->print_id); if (rc) { /* FIXME: do something useful here? * Current libata behavior will * tear down everything when * the module is removed * or the h/w is unplugged. */ } } /* in order to keep device order, we need to synchronize at this point */ async_synchronize_cookie(cookie); ata_scsi_scan_host(ap, 1); } /** * ata_host_register - register initialized ATA host * @host: ATA host to register * @sht: template for SCSI host * * Register initialized ATA host. @host is allocated using * ata_host_alloc() and fully initialized by LLD. This function * starts ports, registers @host with ATA and SCSI layers and * probe registered devices. * * LOCKING: * Inherited from calling layer (may sleep). * * RETURNS: * 0 on success, -errno otherwise. */ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) { int i, rc; /* host must have been started */ if (!(host->flags & ATA_HOST_STARTED)) { dev_printk(KERN_ERR, host->dev, "BUG: trying to register unstarted host\n"); WARN_ON(1); return -EINVAL; } /* Blow away unused ports. This happens when LLD can't * determine the exact number of ports to allocate at * allocation time. */ for (i = host->n_ports; host->ports[i]; i++) kfree(host->ports[i]); /* give ports names and add SCSI hosts */ for (i = 0; i < host->n_ports; i++) host->ports[i]->print_id = ata_print_id++; rc = ata_scsi_add_hosts(host, sht); if (rc) return rc; /* associate with ACPI nodes */ ata_acpi_associate(host); /* set cable, sata_spd_limit and report */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; unsigned long xfer_mask; /* set SATA cable type if still unset */ if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) ap->cbl = ATA_CBL_SATA; /* init sata_spd_limit to the current value */ sata_link_init_spd(&ap->link); if (ap->slave_link) sata_link_init_spd(ap->slave_link); /* print per-port info to dmesg */ xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, ap->udma_mask); if (!ata_port_is_dummy(ap)) { ata_port_printk(ap, KERN_INFO, "%cATA max %s %s\n", (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', ata_mode_string(xfer_mask), ap->link.eh_info.desc); ata_ehi_clear_desc(&ap->link.eh_info); } else ata_port_printk(ap, KERN_INFO, "DUMMY\n"); } /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; async_schedule(async_port_probe, ap); } return 0; } /** * ata_host_activate - start host, request IRQ and register it * @host: target ATA host * @irq: IRQ to request * @irq_handler: irq_handler used when requesting IRQ * @irq_flags: irq_flags used when requesting IRQ * @sht: scsi_host_template to use when registering the host * * After allocating an ATA host and initializing it, most libata * LLDs perform three steps to activate the host - start host, * request IRQ and register it. This helper takes necessasry * arguments and performs the three steps in one go. * * An invalid IRQ skips the IRQ registration and expects the host to * have set polling mode on the port. In this case, @irq_handler * should be NULL. * * LOCKING: * Inherited from calling layer (may sleep). * * RETURNS: * 0 on success, -errno otherwise. */ int ata_host_activate(struct ata_host *host, int irq, irq_handler_t irq_handler, unsigned long irq_flags, struct scsi_host_template *sht) { int i, rc; rc = ata_host_start(host); if (rc) return rc; /* Special case for polling mode */ if (!irq) { WARN_ON(irq_handler); return ata_host_register(host, sht); } rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, dev_driver_string(host->dev), host); if (rc) return rc; for (i = 0; i < host->n_ports; i++) ata_port_desc(host->ports[i], "irq %d", irq); rc = ata_host_register(host, sht); /* if failed, just free the IRQ and leave ports alone */ if (rc) devm_free_irq(host->dev, irq, host); return rc; } /** * ata_port_detach - Detach ATA port in prepration of device removal * @ap: ATA port to be detached * * Detach all ATA devices and the associated SCSI devices of @ap; * then, remove the associated SCSI host. @ap is guaranteed to * be quiescent on return from this function. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_port_detach(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) goto skip_eh; /* tell EH we're leaving & flush EH */ spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_UNLOADING; ata_port_schedule_eh(ap); spin_unlock_irqrestore(ap->lock, flags); /* wait till EH commits suicide */ ata_port_wait_eh(ap); /* it better be dead now */ WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); cancel_rearming_delayed_work(&ap->hotplug_task); skip_eh: /* remove the associated SCSI host */ scsi_remove_host(ap->scsi_host); } /** * ata_host_detach - Detach all ports of an ATA host * @host: Host to detach * * Detach all ports of @host. * * LOCKING: * Kernel thread context (may sleep). */ void ata_host_detach(struct ata_host *host) { int i; for (i = 0; i < host->n_ports; i++) ata_port_detach(host->ports[i]); /* the host is dead now, dissociate ACPI */ ata_acpi_dissociate(host); } #ifdef CONFIG_PCI /** * ata_pci_remove_one - PCI layer callback for device removal * @pdev: PCI device that was removed * * PCI layer indicates to libata via this hook that hot-unplug or * module unload event has occurred. Detach all ports. Resource * release is handled via devres. * * LOCKING: * Inherited from PCI layer (may sleep). */ void ata_pci_remove_one(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct ata_host *host = dev_get_drvdata(dev); ata_host_detach(host); } /* move to PCI subsystem */ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) { unsigned long tmp = 0; switch (bits->width) { case 1: { u8 tmp8 = 0; pci_read_config_byte(pdev, bits->reg, &tmp8); tmp = tmp8; break; } case 2: { u16 tmp16 = 0; pci_read_config_word(pdev, bits->reg, &tmp16); tmp = tmp16; break; } case 4: { u32 tmp32 = 0; pci_read_config_dword(pdev, bits->reg, &tmp32); tmp = tmp32; break; } default: return -EINVAL; } tmp &= bits->mask; return (tmp == bits->val) ? 1 : 0; } #ifdef CONFIG_PM void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) { pci_save_state(pdev); pci_disable_device(pdev); if (mesg.event & PM_EVENT_SLEEP) pci_set_power_state(pdev, PCI_D3hot); } int ata_pci_device_do_resume(struct pci_dev *pdev) { int rc; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); rc = pcim_enable_device(pdev); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "failed to enable device after resume (%d)\n", rc); return rc; } pci_set_master(pdev); return 0; } int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc = 0; rc = ata_host_suspend(host, mesg); if (rc) return rc; ata_pci_device_do_suspend(pdev, mesg); return 0; } int ata_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc == 0) ata_host_resume(host); return rc; } #endif /* CONFIG_PM */ #endif /* CONFIG_PCI */ static int __init ata_parse_force_one(char **cur, struct ata_force_ent *force_ent, const char **reason) { /* FIXME: Currently, there's no way to tag init const data and * using __initdata causes build failure on some versions of * gcc. Once __initdataconst is implemented, add const to the * following structure. */ static struct ata_force_param force_tbl[] __initdata = { { "40c", .cbl = ATA_CBL_PATA40 }, { "80c", .cbl = ATA_CBL_PATA80 }, { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, { "unk", .cbl = ATA_CBL_PATA_UNK }, { "ign", .cbl = ATA_CBL_PATA_IGN }, { "sata", .cbl = ATA_CBL_SATA }, { "1.5Gbps", .spd_limit = 1 }, { "3.0Gbps", .spd_limit = 2 }, { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, }; char *start = *cur, *p = *cur; char *id, *val, *endp; const struct ata_force_param *match_fp = NULL; int nr_matches = 0, i; /* find where this param ends and update *cur */ while (*p != '\0' && *p != ',') p++; if (*p == '\0') *cur = p; else *cur = p + 1; *p = '\0'; /* parse */ p = strchr(start, ':'); if (!p) { val = strstrip(start); goto parse_val; } *p = '\0'; id = strstrip(start); val = strstrip(p + 1); /* parse id */ p = strchr(id, '.'); if (p) { *p++ = '\0'; force_ent->device = simple_strtoul(p, &endp, 10); if (p == endp || *endp != '\0') { *reason = "invalid device"; return -EINVAL; } } force_ent->port = simple_strtoul(id, &endp, 10); if (p == endp || *endp != '\0') { *reason = "invalid port/link"; return -EINVAL; } parse_val: /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { const struct ata_force_param *fp = &force_tbl[i]; if (strncasecmp(val, fp->name, strlen(val))) continue; nr_matches++; match_fp = fp; if (strcasecmp(val, fp->name) == 0) { nr_matches = 1; break; } } if (!nr_matches) { *reason = "unknown value"; return -EINVAL; } if (nr_matches > 1) { *reason = "ambigious value"; return -EINVAL; } force_ent->param = *match_fp; return 0; } static void __init ata_parse_force_param(void) { int idx = 0, size = 1; int last_port = -1, last_device = -1; char *p, *cur, *next; /* calculate maximum number of params and allocate force_tbl */ for (p = ata_force_param_buf; *p; p++) if (*p == ',') size++; ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); if (!ata_force_tbl) { printk(KERN_WARNING "ata: failed to extend force table, " "libata.force ignored\n"); return; } /* parse and populate the table */ for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { const char *reason = ""; struct ata_force_ent te = { .port = -1, .device = -1 }; next = cur; if (ata_parse_force_one(&next, &te, &reason)) { printk(KERN_WARNING "ata: failed to parse force " "parameter \"%s\" (%s)\n", cur, reason); continue; } if (te.port == -1) { te.port = last_port; te.device = last_device; } ata_force_tbl[idx++] = te; last_port = te.port; last_device = te.device; } ata_force_tbl_size = idx; } static int __init ata_init(void) { int rc = -ENOMEM; ata_parse_force_param(); ata_aux_wq = create_singlethread_workqueue("ata_aux"); if (!ata_aux_wq) goto fail; rc = ata_sff_init(); if (rc) goto fail; printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; fail: kfree(ata_force_tbl); if (ata_aux_wq) destroy_workqueue(ata_aux_wq); return rc; } static void __exit ata_exit(void) { ata_sff_exit(); kfree(ata_force_tbl); destroy_workqueue(ata_aux_wq); } subsys_initcall(ata_init); module_exit(ata_exit); static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); int ata_ratelimit(void) { return __ratelimit(&ratelimit); } /** * ata_wait_register - wait until register value changes * @reg: IO-mapped register * @mask: Mask to apply to read register value * @val: Wait condition * @interval: polling interval in milliseconds * @timeout: timeout in milliseconds * * Waiting for some bits of register to change is a common * operation for ATA controllers. This function reads 32bit LE * IO-mapped register @reg and tests for the following condition. * * (*@reg & mask) != val * * If the condition is met, it returns; otherwise, the process is * repeated after @interval_msec until timeout. * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * The final register value. */ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, unsigned long interval, unsigned long timeout) { unsigned long deadline; u32 tmp; tmp = ioread32(reg); /* Calculate timeout _after_ the first read to make sure * preceding writes reach the controller before starting to * eat away the timeout. */ deadline = ata_deadline(jiffies, timeout); while ((tmp & mask) == val && time_before(jiffies, deadline)) { msleep(interval); tmp = ioread32(reg); } return tmp; } /* * Dummy port_ops */ static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) { return AC_ERR_SYSTEM; } static void ata_dummy_error_handler(struct ata_port *ap) { /* truly dummy */ } struct ata_port_operations ata_dummy_port_ops = { .qc_prep = ata_noop_qc_prep, .qc_issue = ata_dummy_qc_issue, .error_handler = ata_dummy_error_handler, }; const struct ata_port_info ata_dummy_port_info = { .port_ops = &ata_dummy_port_ops, }; /* * libata is essentially a library of internal helper functions for * low-level ATA host controller drivers. As such, the API/ABI is * likely to change as new drivers are added and updated. * Do not depend on ABI/API stability. */ EXPORT_SYMBOL_GPL(sata_deb_timing_normal); EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); EXPORT_SYMBOL_GPL(sata_deb_timing_long); EXPORT_SYMBOL_GPL(ata_base_port_ops); EXPORT_SYMBOL_GPL(sata_port_ops); EXPORT_SYMBOL_GPL(ata_dummy_port_ops); EXPORT_SYMBOL_GPL(ata_dummy_port_info); EXPORT_SYMBOL_GPL(ata_link_next); EXPORT_SYMBOL_GPL(ata_dev_next); EXPORT_SYMBOL_GPL(ata_std_bios_param); EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); EXPORT_SYMBOL_GPL(ata_host_init); EXPORT_SYMBOL_GPL(ata_host_alloc); EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); EXPORT_SYMBOL_GPL(ata_slave_link_init); EXPORT_SYMBOL_GPL(ata_host_start); EXPORT_SYMBOL_GPL(ata_host_register); EXPORT_SYMBOL_GPL(ata_host_activate); EXPORT_SYMBOL_GPL(ata_host_detach); EXPORT_SYMBOL_GPL(ata_sg_init); EXPORT_SYMBOL_GPL(ata_qc_complete); EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); EXPORT_SYMBOL_GPL(atapi_cmd_type); EXPORT_SYMBOL_GPL(ata_tf_to_fis); EXPORT_SYMBOL_GPL(ata_tf_from_fis); EXPORT_SYMBOL_GPL(ata_pack_xfermask); EXPORT_SYMBOL_GPL(ata_unpack_xfermask); EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); EXPORT_SYMBOL_GPL(ata_mode_string); EXPORT_SYMBOL_GPL(ata_id_xfermask); EXPORT_SYMBOL_GPL(ata_do_set_mode); EXPORT_SYMBOL_GPL(ata_std_qc_defer); EXPORT_SYMBOL_GPL(ata_noop_qc_prep); EXPORT_SYMBOL_GPL(ata_dev_disable); EXPORT_SYMBOL_GPL(sata_set_spd); EXPORT_SYMBOL_GPL(ata_wait_after_reset); EXPORT_SYMBOL_GPL(sata_link_debounce); EXPORT_SYMBOL_GPL(sata_link_resume); EXPORT_SYMBOL_GPL(ata_std_prereset); EXPORT_SYMBOL_GPL(sata_link_hardreset); EXPORT_SYMBOL_GPL(sata_std_hardreset); EXPORT_SYMBOL_GPL(ata_std_postreset); EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_dev_pair); EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_wait_register); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); EXPORT_SYMBOL_GPL(sata_scr_valid); EXPORT_SYMBOL_GPL(sata_scr_read); EXPORT_SYMBOL_GPL(sata_scr_write); EXPORT_SYMBOL_GPL(sata_scr_write_flush); EXPORT_SYMBOL_GPL(ata_link_online); EXPORT_SYMBOL_GPL(ata_link_offline); #ifdef CONFIG_PM EXPORT_SYMBOL_GPL(ata_host_suspend); EXPORT_SYMBOL_GPL(ata_host_resume); #endif /* CONFIG_PM */ EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); EXPORT_SYMBOL_GPL(ata_do_dev_read_id); EXPORT_SYMBOL_GPL(ata_scsi_simulate); EXPORT_SYMBOL_GPL(ata_pio_need_iordy); EXPORT_SYMBOL_GPL(ata_timing_find_mode); EXPORT_SYMBOL_GPL(ata_timing_compute); EXPORT_SYMBOL_GPL(ata_timing_merge); EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); EXPORT_SYMBOL_GPL(ata_pci_remove_one); #ifdef CONFIG_PM EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); EXPORT_SYMBOL_GPL(ata_pci_device_suspend); EXPORT_SYMBOL_GPL(ata_pci_device_resume); #endif /* CONFIG_PM */ #endif /* CONFIG_PCI */ EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); EXPORT_SYMBOL_GPL(ata_ehi_push_desc); EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); EXPORT_SYMBOL_GPL(ata_port_desc); #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(ata_port_pbar_desc); #endif /* CONFIG_PCI */ EXPORT_SYMBOL_GPL(ata_port_schedule_eh); EXPORT_SYMBOL_GPL(ata_link_abort); EXPORT_SYMBOL_GPL(ata_port_abort); EXPORT_SYMBOL_GPL(ata_port_freeze); EXPORT_SYMBOL_GPL(sata_async_notification); EXPORT_SYMBOL_GPL(ata_eh_freeze_port); EXPORT_SYMBOL_GPL(ata_eh_thaw_port); EXPORT_SYMBOL_GPL(ata_eh_qc_complete); EXPORT_SYMBOL_GPL(ata_eh_qc_retry); EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); EXPORT_SYMBOL_GPL(ata_do_eh); EXPORT_SYMBOL_GPL(ata_std_error_handler); EXPORT_SYMBOL_GPL(ata_cable_40wire); EXPORT_SYMBOL_GPL(ata_cable_80wire); EXPORT_SYMBOL_GPL(ata_cable_unknown); EXPORT_SYMBOL_GPL(ata_cable_ignore); EXPORT_SYMBOL_GPL(ata_cable_sata);
gpl-2.0
MassStash/htc_m9_kernel_sense_5.0.2
drivers/power/bq28400_battery.c
768
25239
/* Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * High Level description: * http://www.ti.com/lit/ds/symlink/bq28400.pdf * Thechnical Reference: * http://www.ti.com/lit/ug/sluu431/sluu431.pdf */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/power_supply.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> #include <linux/printk.h> #define BQ28400_NAME "bq28400" #define BQ28400_REV "1.0" /* SBS Commands (page 63) */ #define SBS_MANUFACTURER_ACCESS 0x00 #define SBS_BATTERY_MODE 0x03 #define SBS_TEMPERATURE 0x08 #define SBS_VOLTAGE 0x09 #define SBS_CURRENT 0x0A #define SBS_AVG_CURRENT 0x0B #define SBS_MAX_ERROR 0x0C #define SBS_RSOC 0x0D /* Relative State Of Charge */ #define SBS_REMAIN_CAPACITY 0x0F #define SBS_FULL_CAPACITY 0x10 #define SBS_CHG_CURRENT 0x14 #define SBS_CHG_VOLTAGE 0x15 #define SBS_BATTERY_STATUS 0x16 #define SBS_CYCLE_COUNT 0x17 #define SBS_DESIGN_CAPACITY 0x18 #define SBS_DESIGN_VOLTAGE 0x19 #define SBS_SPEC_INFO 0x1A #define SBS_MANUFACTURE_DATE 0x1B #define SBS_SERIAL_NUMBER 0x1C #define SBS_MANUFACTURER_NAME 0x20 #define SBS_DEVICE_NAME 0x21 #define SBS_DEVICE_CHEMISTRY 0x22 #define SBS_MANUFACTURER_DATA 0x23 #define SBS_AUTHENTICATE 0x2F #define SBS_CELL_VOLTAGE1 0x3E #define SBS_CELL_VOLTAGE2 0x3F /* Extended SBS Commands (page 71) */ #define SBS_FET_CONTROL 0x46 #define SBS_SAFETY_ALERT 0x50 #define SBS_SAFETY_STATUS 0x51 #define SBS_PE_ALERT 0x52 #define SBS_PE_STATUS 0x53 #define SBS_OPERATION_STATUS 0x54 #define SBS_CHARGING_STATUS 0x55 #define SBS_FET_STATUS 0x56 #define SBS_PACK_VOLTAGE 0x5A #define SBS_TS0_TEMPERATURE 0x5E #define SBS_FULL_ACCESS_KEY 0x61 #define SBS_PF_KEY 0x62 #define SBS_AUTH_KEY3 0x63 #define SBS_AUTH_KEY2 0x64 #define SBS_AUTH_KEY1 0x65 #define SBS_AUTH_KEY0 0x66 #define SBS_MANUFACTURER_INFO 0x70 #define SBS_SENSE_RESISTOR 0x71 #define SBS_TEMP_RANGE 0x72 /* SBS Sub-Commands (16 bits) */ /* SBS_MANUFACTURER_ACCESS CMD */ #define SUBCMD_DEVICE_TYPE 0x01 #define SUBCMD_FIRMWARE_VERSION 0x02 #define SUBCMD_HARDWARE_VERSION 0x03 #define SUBCMD_DF_CHECKSUM 0x04 #define SUBCMD_EDV 0x05 #define SUBCMD_CHEMISTRY_ID 0x08 /* SBS_CHARGING_STATUS */ #define CHG_STATUS_BATTERY_DEPLETED BIT(0) #define CHG_STATUS_OVERCHARGE BIT(1) #define CHG_STATUS_OVERCHARGE_CURRENT BIT(2) #define CHG_STATUS_OVERCHARGE_VOLTAGE BIT(3) #define CHG_STATUS_CELL_BALANCING BIT(6) #define CHG_STATUS_HOT_TEMP_CHARGING BIT(8) #define CHG_STATUS_STD1_TEMP_CHARGING BIT(9) #define CHG_STATUS_STD2_TEMP_CHARGING BIT(10) #define CHG_STATUS_LOW_TEMP_CHARGING BIT(11) #define CHG_STATUS_PRECHARGING_EXIT BIT(13) #define CHG_STATUS_SUSPENDED BIT(14) #define CHG_STATUS_DISABLED BIT(15) /* SBS_FET_STATUS */ #define FET_STATUS_DISCHARGE BIT(1) #define FET_STATUS_CHARGE BIT(2) #define FET_STATUS_PRECHARGE BIT(3) /* SBS_BATTERY_STATUS */ #define BAT_STATUS_SBS_ERROR 0x0F #define BAT_STATUS_EMPTY BIT(4) #define BAT_STATUS_FULL BIT(5) #define BAT_STATUS_DISCHARGING BIT(6) #define BAT_STATUS_OVER_TEMPERATURE BIT(12) #define BAT_STATUS_OVER_CHARGED BIT(15) #define ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN (-2731) #define BQ_TERMINATION_CURRENT_MA 200 #define BQ_MAX_STR_LEN 32 struct bq28400_device { struct i2c_client *client; struct delayed_work periodic_user_space_update_work; struct dentry *dent; struct power_supply batt_psy; struct power_supply *dc_psy; bool is_charging_enabled; u32 temp_cold; /* in degree celsius */ u32 temp_hot; /* in degree celsius */ }; static struct bq28400_device *bq28400_dev; static enum power_supply_property pm_power_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; struct debug_reg { char *name; u8 reg; u16 subcmd; }; static int fake_battery = -EINVAL; module_param(fake_battery, int, 0644); #define BQ28400_DEBUG_REG(x) {#x, SBS_##x, 0} #define BQ28400_DEBUG_SUBREG(x, y) {#y, SBS_##x, SUBCMD_##y} /* Note: Some register can be read only in Unsealed mode */ static struct debug_reg bq28400_debug_regs[] = { BQ28400_DEBUG_REG(MANUFACTURER_ACCESS), BQ28400_DEBUG_REG(BATTERY_MODE), BQ28400_DEBUG_REG(TEMPERATURE), BQ28400_DEBUG_REG(VOLTAGE), BQ28400_DEBUG_REG(CURRENT), BQ28400_DEBUG_REG(AVG_CURRENT), BQ28400_DEBUG_REG(MAX_ERROR), BQ28400_DEBUG_REG(RSOC), BQ28400_DEBUG_REG(REMAIN_CAPACITY), BQ28400_DEBUG_REG(FULL_CAPACITY), BQ28400_DEBUG_REG(CHG_CURRENT), BQ28400_DEBUG_REG(CHG_VOLTAGE), BQ28400_DEBUG_REG(BATTERY_STATUS), BQ28400_DEBUG_REG(CYCLE_COUNT), BQ28400_DEBUG_REG(DESIGN_CAPACITY), BQ28400_DEBUG_REG(DESIGN_VOLTAGE), BQ28400_DEBUG_REG(SPEC_INFO), BQ28400_DEBUG_REG(MANUFACTURE_DATE), BQ28400_DEBUG_REG(SERIAL_NUMBER), BQ28400_DEBUG_REG(MANUFACTURER_NAME), BQ28400_DEBUG_REG(DEVICE_NAME), BQ28400_DEBUG_REG(DEVICE_CHEMISTRY), BQ28400_DEBUG_REG(MANUFACTURER_DATA), BQ28400_DEBUG_REG(AUTHENTICATE), BQ28400_DEBUG_REG(CELL_VOLTAGE1), BQ28400_DEBUG_REG(CELL_VOLTAGE2), BQ28400_DEBUG_REG(SAFETY_ALERT), BQ28400_DEBUG_REG(SAFETY_STATUS), BQ28400_DEBUG_REG(PE_ALERT), BQ28400_DEBUG_REG(PE_STATUS), BQ28400_DEBUG_REG(OPERATION_STATUS), BQ28400_DEBUG_REG(CHARGING_STATUS), BQ28400_DEBUG_REG(FET_STATUS), BQ28400_DEBUG_REG(FULL_ACCESS_KEY), BQ28400_DEBUG_REG(PF_KEY), BQ28400_DEBUG_REG(MANUFACTURER_INFO), BQ28400_DEBUG_REG(SENSE_RESISTOR), BQ28400_DEBUG_REG(TEMP_RANGE), BQ28400_DEBUG_SUBREG(MANUFACTURER_ACCESS, DEVICE_TYPE), BQ28400_DEBUG_SUBREG(MANUFACTURER_ACCESS, FIRMWARE_VERSION), BQ28400_DEBUG_SUBREG(MANUFACTURER_ACCESS, HARDWARE_VERSION), BQ28400_DEBUG_SUBREG(MANUFACTURER_ACCESS, DF_CHECKSUM), BQ28400_DEBUG_SUBREG(MANUFACTURER_ACCESS, EDV), BQ28400_DEBUG_SUBREG(MANUFACTURER_ACCESS, CHEMISTRY_ID), }; static int bq28400_read_reg(struct i2c_client *client, u8 reg) { int val; val = i2c_smbus_read_word_data(client, reg); if (val < 0) pr_err("i2c read fail. reg = 0x%x.ret = %d.\n", reg, val); else pr_debug("reg = 0x%02X.val = 0x%04X.\n", reg , val); return val; } static int bq28400_write_reg(struct i2c_client *client, u8 reg, u16 val) { int ret; ret = i2c_smbus_write_word_data(client, reg, val); if (ret < 0) pr_err("i2c read fail. reg = 0x%x.val = 0x%x.ret = %d.\n", reg, val, ret); else pr_debug("reg = 0x%02X.val = 0x%02X.\n", reg , val); return ret; } static int bq28400_read_subcmd(struct i2c_client *client, u8 reg, u16 subcmd) { int ret; u8 buf[4]; u16 val = 0; buf[0] = reg; buf[1] = subcmd & 0xFF; buf[2] = (subcmd >> 8) & 0xFF; /* Control sub-command */ ret = i2c_master_send(client, buf, 3); if (ret < 0) { pr_err("i2c tx fail. reg = 0x%x.ret = %d.\n", reg, ret); return ret; } udelay(66); /* Read Result of subcmd */ ret = i2c_master_send(client, buf, 1); memset(buf, 0xAA, sizeof(buf)); ret = i2c_master_recv(client, buf, 2); if (ret < 0) { pr_err("i2c rx fail. reg = 0x%x.ret = %d.\n", reg, ret); return ret; } val = (buf[1] << 8) + buf[0]; pr_debug("reg = 0x%02X.subcmd = 0x%x.val = 0x%04X.\n", reg , subcmd, val); return val; } static int bq28400_read_block(struct i2c_client *client, u8 reg, u8 len, u8 *buf) { int ret; u32 val; ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf); val = buf[0] + (buf[1] << 8) + (buf[2] << 16) + (buf[3] << 24); if (ret < 0) pr_err("i2c read fail. reg = 0x%x.ret = %d.\n", reg, ret); else pr_debug("reg = 0x%02X.val = 0x%04X.\n", reg , val); return val; } /* * Read a string from a device. * Returns string length on success or error on failure (negative value). */ static int bq28400_read_string(struct i2c_client *client, u8 reg, char *str, u8 max_len) { int ret; int len; ret = bq28400_read_block(client, reg, max_len, str); if (ret < 0) return ret; len = str[0]; /* Actual length */ if (len > max_len - 2) { /* reduce len byte and null */ pr_err("len = %d invalid.\n", len); return -EINVAL; } memcpy(&str[0], &str[1], len); /* Move sting to the start */ str[len] = 0; /* put NULL after actual size */ pr_debug("len = %d.str = %s.\n", len, str); return len; } #define BQ28400_INVALID_TEMPERATURE -999 /* * Return the battery temperature in tenths of degree Celsius * Or -99.9 C if something fails. */ static int bq28400_read_temperature(struct i2c_client *client) { int temp; /* temperature resolution 0.1 Kelvin */ temp = bq28400_read_reg(client, SBS_TEMPERATURE); if (temp < 0) return BQ28400_INVALID_TEMPERATURE; temp = temp + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN; pr_debug("temp = %d C\n", temp/10); return temp; } /* * Return the battery Voltage in milivolts 0..20 V * Or < 0 if something fails. */ static int bq28400_read_voltage(struct i2c_client *client) { int mvolt = 0; mvolt = bq28400_read_reg(client, SBS_VOLTAGE); if (mvolt < 0) return mvolt; pr_debug("volt = %d mV.\n", mvolt); return mvolt; } /* * Return the battery Current in miliamps * Or 0 if something fails. * Positive current indicates charging * Negative current indicates discharging. * Current-now is calculated every second. */ static int bq28400_read_current(struct i2c_client *client) { s16 current_ma = 0; current_ma = bq28400_read_reg(client, SBS_CURRENT); pr_debug("current = %d mA.\n", current_ma); return current_ma; } /* * Return the Average battery Current in miliamps * Or 0 if something fails. * Positive current indicates charging * Negative current indicates discharging. * Average Current is the rolling 1 minute average current. */ static int bq28400_read_avg_current(struct i2c_client *client) { s16 current_ma = 0; current_ma = bq28400_read_reg(client, SBS_AVG_CURRENT); pr_debug("avg_current=%d mA.\n", current_ma); return current_ma; } /* * Return the battery Relative-State-Of-Charge 0..100 % * Or negative value if something fails. */ static int bq28400_read_rsoc(struct i2c_client *client) { int percentage = 0; if (fake_battery != -EINVAL) { pr_debug("Reporting Fake SOC = %d\n", fake_battery); return fake_battery; } /* This register is only 1 byte */ percentage = i2c_smbus_read_byte_data(client, SBS_RSOC); if (percentage < 0) { pr_err("I2C failure when reading rsoc.\n"); return percentage; } pr_debug("percentage = %d.\n", percentage); return percentage; } /* * Return the battery Capacity in mAh. * Or 0 if something fails. */ static int bq28400_read_full_capacity(struct i2c_client *client) { int capacity = 0; capacity = bq28400_read_reg(client, SBS_FULL_CAPACITY); if (capacity < 0) return 0; pr_debug("full-capacity = %d mAh.\n", capacity); return capacity; } /* * Return the battery Capacity in mAh. * Or 0 if something fails. */ static int bq28400_read_remain_capacity(struct i2c_client *client) { int capacity = 0; capacity = bq28400_read_reg(client, SBS_REMAIN_CAPACITY); if (capacity < 0) return 0; pr_debug("remain-capacity = %d mAh.\n", capacity); return capacity; } static int bq28400_enable_charging(struct bq28400_device *bq28400_dev, bool enable) { int ret; static bool is_charging_enabled; if (bq28400_dev->dc_psy == NULL) { bq28400_dev->dc_psy = power_supply_get_by_name("dc"); if (bq28400_dev->dc_psy == NULL) { pr_err("fail to get dc-psy.\n"); return -ENODEV; } } if (is_charging_enabled == enable) { pr_debug("Charging enable already = %d.\n", enable); return 0; } ret = power_supply_set_online(bq28400_dev->dc_psy, enable); if (ret < 0) { pr_err("fail to set dc-psy online to %d.\n", enable); return ret; } is_charging_enabled = enable; pr_debug("Charging enable = %d.\n", enable); return 0; } static int bq28400_get_prop_status(struct i2c_client *client) { int status = POWER_SUPPLY_STATUS_UNKNOWN; int rsoc; s16 current_ma = 0; u16 battery_status; int temperature; struct bq28400_device *dev = i2c_get_clientdata(client); battery_status = bq28400_read_reg(client, SBS_BATTERY_STATUS); rsoc = bq28400_read_rsoc(client); current_ma = bq28400_read_current(client); temperature = bq28400_read_temperature(client); temperature = temperature / 10; /* in degree celsius */ if (battery_status & BAT_STATUS_EMPTY) pr_debug("Battery report Empty.\n"); /* Battery may report FULL before rsoc is 100% * for protection and cell-balancing. * The FULL report may remain when rsoc drops from 100%. * If battery is full but DC-Jack is removed then report discahrging. */ if (battery_status & BAT_STATUS_FULL) { pr_debug("Battery report Full.\n"); bq28400_enable_charging(bq28400_dev, false); if (current_ma < 0) return POWER_SUPPLY_STATUS_DISCHARGING; return POWER_SUPPLY_STATUS_FULL; } if (rsoc == 100) { bq28400_enable_charging(bq28400_dev, false); pr_debug("Full.\n"); return POWER_SUPPLY_STATUS_FULL; } /* Enable charging when battery is not full and temperature is ok */ if ((temperature > dev->temp_cold) && (temperature < dev->temp_hot)) bq28400_enable_charging(bq28400_dev, true); else bq28400_enable_charging(bq28400_dev, false); /* * Positive current indicates charging * Negative current indicates discharging. * Charging is stopped at termination-current. */ if (current_ma < 0) { pr_debug("Discharging.\n"); status = POWER_SUPPLY_STATUS_DISCHARGING; } else if (current_ma > BQ_TERMINATION_CURRENT_MA) { pr_debug("Charging.\n"); status = POWER_SUPPLY_STATUS_CHARGING; } else { pr_debug("Not Charging.\n"); status = POWER_SUPPLY_STATUS_NOT_CHARGING; } return status; } static int bq28400_get_prop_charge_type(struct i2c_client *client) { u16 battery_status; u16 chg_status; u16 fet_status; battery_status = bq28400_read_reg(client, SBS_BATTERY_STATUS); chg_status = bq28400_read_reg(client, SBS_CHARGING_STATUS); fet_status = bq28400_read_reg(client, SBS_FET_STATUS); if (battery_status & BAT_STATUS_DISCHARGING) { pr_debug("Discharging.\n"); return POWER_SUPPLY_CHARGE_TYPE_NONE; } if (fet_status & FET_STATUS_PRECHARGE) { pr_debug("Pre-Charging.\n"); return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; } if (chg_status & CHG_STATUS_HOT_TEMP_CHARGING) { pr_debug("Hot-Temp-Charging.\n"); return POWER_SUPPLY_CHARGE_TYPE_FAST; } if (chg_status & CHG_STATUS_LOW_TEMP_CHARGING) { pr_debug("Low-Temp-Charging.\n"); return POWER_SUPPLY_CHARGE_TYPE_FAST; } if (chg_status & CHG_STATUS_STD1_TEMP_CHARGING) { pr_debug("STD1-Temp-Charging.\n"); return POWER_SUPPLY_CHARGE_TYPE_FAST; } if (chg_status & CHG_STATUS_STD2_TEMP_CHARGING) { pr_debug("STD2-Temp-Charging.\n"); return POWER_SUPPLY_CHARGE_TYPE_FAST; } if (chg_status & CHG_STATUS_BATTERY_DEPLETED) pr_debug("battery_depleted.\n"); if (chg_status & CHG_STATUS_CELL_BALANCING) pr_debug("cell_balancing.\n"); if (chg_status & CHG_STATUS_OVERCHARGE) { pr_err("overcharge fault.\n"); return POWER_SUPPLY_CHARGE_TYPE_NONE; } if (chg_status & CHG_STATUS_SUSPENDED) { pr_info("Suspended.\n"); return POWER_SUPPLY_CHARGE_TYPE_NONE; } if (chg_status & CHG_STATUS_DISABLED) { pr_info("Disabled.\n"); return POWER_SUPPLY_CHARGE_TYPE_NONE; } return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; } static bool bq28400_get_prop_present(struct i2c_client *client) { int val; val = bq28400_read_reg(client, SBS_BATTERY_STATUS); /* If the bq28400 is inside the battery pack * then when battery is removed the i2c transfer will fail. */ if (val < 0) return false; /* TODO - support when bq28400 is not embedded in battery pack */ return true; } /* * User sapce read the battery info. * Get data online via I2C from the battery gauge. */ static int bq28400_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct bq28400_device *dev = container_of(psy, struct bq28400_device, batt_psy); struct i2c_client *client = dev->client; static char str[BQ_MAX_STR_LEN]; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bq28400_get_prop_status(client); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = bq28400_get_prop_charge_type(client); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = bq28400_get_prop_present(client); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = bq28400_read_voltage(client); val->intval *= 1000; /* mV to uV */ break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = bq28400_read_rsoc(client); if (val->intval < 0) ret = -EINVAL; break; case POWER_SUPPLY_PROP_CURRENT_NOW: /* Positive current indicates drawing */ val->intval = -bq28400_read_current(client); val->intval *= 1000; /* mA to uA */ break; case POWER_SUPPLY_PROP_CURRENT_AVG: /* Positive current indicates drawing */ val->intval = -bq28400_read_avg_current(client); val->intval *= 1000; /* mA to uA */ break; case POWER_SUPPLY_PROP_TEMP: val->intval = bq28400_read_temperature(client); break; case POWER_SUPPLY_PROP_CHARGE_FULL: val->intval = bq28400_read_full_capacity(client); break; case POWER_SUPPLY_PROP_CHARGE_NOW: val->intval = bq28400_read_remain_capacity(client); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_MODEL_NAME: bq28400_read_string(client, SBS_DEVICE_NAME, str, 20); val->strval = str; break; case POWER_SUPPLY_PROP_MANUFACTURER: bq28400_read_string(client, SBS_MANUFACTURER_NAME, str, 20); val->strval = str; break; default: pr_err(" psp %d Not supoprted.\n", psp); ret = -EINVAL; break; } return ret; } static int bq28400_set_reg(void *data, u64 val) { struct debug_reg *dbg = data; u8 reg = dbg->reg; int ret; struct i2c_client *client = bq28400_dev->client; ret = bq28400_write_reg(client, reg, val); return ret; } static int bq28400_get_reg(void *data, u64 *val) { struct debug_reg *dbg = data; u8 reg = dbg->reg; u16 subcmd = dbg->subcmd; int ret; struct i2c_client *client = bq28400_dev->client; if (subcmd) ret = bq28400_read_subcmd(client, reg, subcmd); else ret = bq28400_read_reg(client, reg); if (ret < 0) return ret; *val = ret; return 0; } DEFINE_SIMPLE_ATTRIBUTE(reg_fops, bq28400_get_reg, bq28400_set_reg, "0x%04llx\n"); static int bq28400_create_debugfs_entries(struct bq28400_device *bq28400_dev) { int i; bq28400_dev->dent = debugfs_create_dir(BQ28400_NAME, NULL); if (IS_ERR(bq28400_dev->dent)) { pr_err("bq28400 driver couldn't create debugfs dir\n"); return -EFAULT; } for (i = 0 ; i < ARRAY_SIZE(bq28400_debug_regs) ; i++) { char *name = bq28400_debug_regs[i].name; struct dentry *file; void *data = &bq28400_debug_regs[i]; file = debugfs_create_file(name, 0644, bq28400_dev->dent, data, &reg_fops); if (IS_ERR(file)) { pr_err("debugfs_create_file %s failed.\n", name); return -EFAULT; } } return 0; } static int bq28400_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { pr_debug("psp = %d.val = %d.\n", psp, val->intval); return -EINVAL; } static void bq28400_external_power_changed(struct power_supply *psy) { pr_debug("Notify power_supply_changed.\n"); /* The battery gauge monitors the current and voltage every 1 second. * Therefore a delay from the time that the charger start/stop charging * until the battery gauge detects it. */ msleep(1000); /* Update LEDs and notify uevents */ power_supply_changed(&bq28400_dev->batt_psy); } static int bq28400_register_psy(struct bq28400_device *bq28400_dev) { int ret; bq28400_dev->batt_psy.name = "bq28400_battery"; bq28400_dev->batt_psy.type = POWER_SUPPLY_TYPE_BMS; bq28400_dev->batt_psy.num_supplicants = 0; bq28400_dev->batt_psy.properties = pm_power_props; bq28400_dev->batt_psy.num_properties = ARRAY_SIZE(pm_power_props); bq28400_dev->batt_psy.get_property = bq28400_get_property; bq28400_dev->batt_psy.set_property = bq28400_set_property; bq28400_dev->batt_psy.external_power_changed = bq28400_external_power_changed; ret = power_supply_register(&bq28400_dev->client->dev, &bq28400_dev->batt_psy); if (ret) { pr_err("failed to register power_supply. ret=%d.\n", ret); return ret; } return 0; } /** * Update userspace every 1 minute. * Normally it takes more than 120 minutes (two hours) to * charge/discahrge the battery, * so updating every 1 minute should be enough for 1% change * detection. * Any immidiate change detected by the DC charger is notified * by the bq28400_external_power_changed callback, which notify * the user space. */ static void bq28400_periodic_user_space_update_worker(struct work_struct *work) { u32 delay_msec = 60*1000; pr_debug("Notify user space.\n"); /* Notify user space via kobject_uevent change notification */ power_supply_changed(&bq28400_dev->batt_psy); schedule_delayed_work(&bq28400_dev->periodic_user_space_update_work, round_jiffies_relative(msecs_to_jiffies (delay_msec))); } static int bq28400_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; struct device_node *dev_node = client->dev.of_node; if (dev_node == NULL) { pr_err("Device Tree node doesn't exist.\n"); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_err(" i2c func fail.\n"); return -EIO; } if (bq28400_read_reg(client, SBS_BATTERY_STATUS) < 0) { pr_err("Device doesn't exist.\n"); return -ENODEV; } bq28400_dev = kzalloc(sizeof(*bq28400_dev), GFP_KERNEL); if (!bq28400_dev) { pr_err(" alloc fail.\n"); return -ENOMEM; } /* Note: Lithium-ion battery normal temperature range 0..40 C */ ret = of_property_read_u32(dev_node, "ti,temp-cold-degc", &(bq28400_dev->temp_cold)); if (ret) { pr_err("Unable to read cold temperature. ret=%d.\n", ret); goto err_dev_node; } pr_debug("cold temperature limit = %d C.\n", bq28400_dev->temp_cold); ret = of_property_read_u32(dev_node, "ti,temp-hot-degc", &(bq28400_dev->temp_hot)); if (ret) { pr_err("Unable to read hot temperature. ret=%d.\n", ret); goto err_dev_node; } pr_debug("hot temperature limit = %d C.\n", bq28400_dev->temp_hot); bq28400_dev->client = client; i2c_set_clientdata(client, bq28400_dev); ret = bq28400_register_psy(bq28400_dev); if (ret) { pr_err(" bq28400_register_psy fail.\n"); goto err_register_psy; } ret = bq28400_create_debugfs_entries(bq28400_dev); if (ret) { pr_err(" bq28400_create_debugfs_entries fail.\n"); goto err_debugfs; } INIT_DELAYED_WORK(&bq28400_dev->periodic_user_space_update_work, bq28400_periodic_user_space_update_worker); schedule_delayed_work(&bq28400_dev->periodic_user_space_update_work, msecs_to_jiffies(1000)); pr_debug("Device is ready.\n"); return 0; err_debugfs: if (bq28400_dev->dent) debugfs_remove_recursive(bq28400_dev->dent); power_supply_unregister(&bq28400_dev->batt_psy); err_register_psy: err_dev_node: kfree(bq28400_dev); bq28400_dev = NULL; pr_info("FAIL.\n"); return ret; } static int bq28400_remove(struct i2c_client *client) { struct bq28400_device *bq28400_dev = i2c_get_clientdata(client); power_supply_unregister(&bq28400_dev->batt_psy); if (bq28400_dev->dent) debugfs_remove_recursive(bq28400_dev->dent); kfree(bq28400_dev); bq28400_dev = NULL; return 0; } static const struct of_device_id bq28400_match[] = { { .compatible = "ti,bq28400-battery" }, { .compatible = "ti,bq30z55-battery" }, { }, }; static const struct i2c_device_id bq28400_id[] = { {BQ28400_NAME, 0}, {}, }; MODULE_DEVICE_TABLE(i2c, bq28400_id); static struct i2c_driver bq28400_driver = { .driver = { .name = BQ28400_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(bq28400_match), }, .probe = bq28400_probe, .remove = bq28400_remove, .id_table = bq28400_id, }; static int __init bq28400_init(void) { pr_info(" bq28400 driver rev %s.\n", BQ28400_REV); return i2c_add_driver(&bq28400_driver); } module_init(bq28400_init); static void __exit bq28400_exit(void) { return i2c_del_driver(&bq28400_driver); } module_exit(bq28400_exit); MODULE_DESCRIPTION("Driver for BQ28400 charger chip"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("i2c:" BQ28400_NAME);
gpl-2.0
ffzg/rhel-3.10-zram
drivers/video/console/fbcon_cw.c
1792
10761
/* * linux/drivers/video/console/fbcon_ud.c -- Software Rotation - 90 degrees * * Copyright (C) 2005 Antonino Daplas <adaplas @pol.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <asm/types.h> #include "fbcon.h" #include "fbcon_rotate.h" /* * Rotation 90 degrees */ static void cw_update_attr(u8 *dst, u8 *src, int attribute, struct vc_data *vc) { int i, j, offset = (vc->vc_font.height < 10) ? 1 : 2; int width = (vc->vc_font.height + 7) >> 3; u8 c, msk = ~(0xff >> offset); for (i = 0; i < vc->vc_font.width; i++) { for (j = 0; j < width; j++) { c = *src; if (attribute & FBCON_ATTRIBUTE_UNDERLINE && !j) c |= msk; if (attribute & FBCON_ATTRIBUTE_BOLD && i) c |= *(src-width); if (attribute & FBCON_ATTRIBUTE_REVERSE) c = ~c; src++; *dst++ = c; } } } static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; u32 vxres = GETVXRES(ops->p->scrollmode, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; area.dx = vxres - ((dy + height) * vc->vc_font.height); area.dy = dx * vc->vc_font.width; area.width = height * vc->vc_font.height; area.height = width * vc->vc_font.width; info->fbops->fb_copyarea(info, &area); } static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); region.dy = sx * vc->vc_font.width; region.height = width * vc->vc_font.width; region.width = height * vc->vc_font.height; region.rop = ROP_COPY; info->fbops->fb_fillrect(info, &region); } static inline void cw_putcs_aligned(struct vc_data *vc, struct fb_info *info, const u16 *s, u32 attr, u32 cnt, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { struct fbcon_ops *ops = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = (vc->vc_font.height + 7) >> 3; u8 *src; while (cnt--) { src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize; if (attr) { cw_update_attr(buf, src, attr, vc); src = buf; } if (likely(idx == 1)) __fb_pad_aligned_buffer(dst, d_pitch, src, idx, vc->vc_font.width); else fb_pad_aligned_buffer(dst, d_pitch, src, idx, vc->vc_font.width); dst += d_pitch * vc->vc_font.width; } info->fbops->fb_imageblit(info, image); } static void cw_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg) { struct fb_image image; struct fbcon_ops *ops = info->fbcon_par; u32 width = (vc->vc_font.height + 7)/8; u32 cellsize = width * vc->vc_font.width; u32 maxcnt = info->pixmap.size/cellsize; u32 scan_align = info->pixmap.scan_align - 1; u32 buf_align = info->pixmap.buf_align - 1; u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; image.fg_color = fg; image.bg_color = bg; image.dx = vxres - ((yy + 1) * vc->vc_font.height); image.dy = xx * vc->vc_font.width; image.width = vc->vc_font.height; image.depth = 1; if (attribute) { buf = kmalloc(cellsize, GFP_KERNEL); if (!buf) return; } while (count) { if (count > maxcnt) cnt = maxcnt; else cnt = count; image.height = vc->vc_font.width * cnt; pitch = ((image.width + 7) >> 3) + scan_align; pitch &= ~scan_align; size = pitch * image.height + buf_align; size &= ~buf_align; dst = fb_get_buffer_offset(info, &info->pixmap, size); image.data = dst; cw_putcs_aligned(vc, info, s, attribute, cnt, pitch, width, cellsize, &image, buf, dst); image.dy += image.height; count -= cnt; s += cnt; } /* buf is always NULL except when in monochrome mode, so in this case it's a gain to check buf against NULL even though kfree() handles NULL pointers just fine */ if (unlikely(buf)) kfree(buf); } static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; unsigned int rw = info->var.yres - (vc->vc_cols*cw); unsigned int bh = info->var.xres - (vc->vc_rows*ch); unsigned int rs = info->var.yres - rw; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; region.color = attr_bgcol_ec(bgshift,vc,info); region.rop = ROP_COPY; if (rw && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; region.width = info->var.xres_virtual; info->fbops->fb_fillrect(info, &region); } if (bh) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; region.width = bh; info->fbops->fb_fillrect(info, &region); } } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; int y = real_y(ops->p, vc->vc_y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; cursor.set = 0; if (softback_lines) { if (y + softback_lines >= vc->vc_rows) { mode = CM_ERASE; ops->cursor_flash = 0; return; } else y += softback_lines; } c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); if (ops->cursor_state.image.data != src || ops->cursor_reset) { ops->cursor_state.image.data = src; cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { u8 *dst; dst = kmalloc(w * vc->vc_font.width, GFP_ATOMIC); if (!dst) return; kfree(ops->cursor_data); ops->cursor_data = dst; cw_update_attr(dst, src, attribute, vc); src = dst; } if (ops->cursor_state.image.fg_color != fg || ops->cursor_state.image.bg_color != bg || ops->cursor_reset) { ops->cursor_state.image.fg_color = fg; ops->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } if (ops->cursor_state.image.height != vc->vc_font.width || ops->cursor_state.image.width != vc->vc_font.height || ops->cursor_reset) { ops->cursor_state.image.height = vc->vc_font.width; ops->cursor_state.image.width = vc->vc_font.height; cursor.set |= FB_CUR_SETSIZE; } dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); dy = vc->vc_x * vc->vc_font.width; if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || ops->cursor_reset) { ops->cursor_state.image.dx = dx; ops->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || ops->cursor_reset) { ops->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || vc->vc_cursor_type != ops->p->cursor_shape || ops->cursor_state.mask == NULL || ops->cursor_reset) { char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC); int cur_height, size, i = 0; int width = (vc->vc_font.width + 7)/8; if (!mask) return; tmp = kmalloc(width * vc->vc_font.height, GFP_ATOMIC); if (!tmp) { kfree(mask); return; } kfree(ops->cursor_state.mask); ops->cursor_state.mask = mask; ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; switch (ops->p->cursor_shape & CUR_HWMASK) { case CUR_NONE: cur_height = 0; break; case CUR_UNDERLINE: cur_height = (vc->vc_font.height < 10) ? 1 : 2; break; case CUR_LOWER_THIRD: cur_height = vc->vc_font.height/3; break; case CUR_LOWER_HALF: cur_height = vc->vc_font.height >> 1; break; case CUR_TWO_THIRDS: cur_height = (vc->vc_font.height << 1)/3; break; case CUR_BLOCK: default: cur_height = vc->vc_font.height; break; } size = (vc->vc_font.height - cur_height) * width; while (size--) tmp[i++] = 0; size = cur_height * width; while (size--) tmp[i++] = 0xff; memset(mask, 0, w * vc->vc_font.width); rotate_cw(tmp, mask, vc->vc_font.width, vc->vc_font.height); kfree(tmp); } switch (mode) { case CM_ERASE: ops->cursor_state.enable = 0; break; case CM_DRAW: case CM_MOVE: default: ops->cursor_state.enable = (use_sw) ? 0 : 1; break; } cursor.image.data = src; cursor.image.fg_color = ops->cursor_state.image.fg_color; cursor.image.bg_color = ops->cursor_state.image.bg_color; cursor.image.dx = ops->cursor_state.image.dx; cursor.image.dy = ops->cursor_state.image.dy; cursor.image.height = ops->cursor_state.image.height; cursor.image.width = ops->cursor_state.image.width; cursor.hot.x = ops->cursor_state.hot.x; cursor.hot.y = ops->cursor_state.hot.y; cursor.mask = ops->cursor_state.mask; cursor.enable = ops->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; if (info->fbops->fb_cursor) err = info->fbops->fb_cursor(info, &cursor); if (err) soft_cursor(info, &cursor); ops->cursor_reset = 0; } static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 vxres = GETVXRES(ops->p->scrollmode, info); u32 xoffset; int err; xoffset = vxres - (info->var.xres + ops->var.yoffset); ops->var.yoffset = ops->var.xoffset; ops->var.xoffset = xoffset; err = fb_pan_display(info, &ops->var); ops->var.xoffset = info->var.xoffset; ops->var.yoffset = info->var.yoffset; ops->var.vmode = info->var.vmode; return err; } void fbcon_rotate_cw(struct fbcon_ops *ops) { ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; ops->cursor = cw_cursor; ops->update_start = cw_update_start; } EXPORT_SYMBOL(fbcon_rotate_cw); MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); MODULE_DESCRIPTION("Console Rotation (90 degrees) Support"); MODULE_LICENSE("GPL");
gpl-2.0
whoi-acomms/linux
fs/ecryptfs/main.c
2304
24957
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok * Copyright (C) 2001-2003 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/dcache.h> #include <linux/file.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/skbuff.h> #include <linux/crypto.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/key.h> #include <linux/parser.h> #include <linux/fs_stack.h> #include <linux/slab.h> #include <linux/magic.h> #include "ecryptfs_kernel.h" /** * Module parameter that defines the ecryptfs_verbosity level. */ int ecryptfs_verbosity = 0; module_param(ecryptfs_verbosity, int, 0); MODULE_PARM_DESC(ecryptfs_verbosity, "Initial verbosity level (0 or 1; defaults to " "0, which is Quiet)"); /** * Module parameter that defines the number of message buffer elements */ unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS; module_param(ecryptfs_message_buf_len, uint, 0); MODULE_PARM_DESC(ecryptfs_message_buf_len, "Number of message buffer elements"); /** * Module parameter that defines the maximum guaranteed amount of time to wait * for a response from ecryptfsd. The actual sleep time will be, more than * likely, a small amount greater than this specified value, but only less if * the message successfully arrives. */ signed long ecryptfs_message_wait_timeout = ECRYPTFS_MAX_MSG_CTX_TTL / HZ; module_param(ecryptfs_message_wait_timeout, long, 0); MODULE_PARM_DESC(ecryptfs_message_wait_timeout, "Maximum number of seconds that an operation will " "sleep while waiting for a message response from " "userspace"); /** * Module parameter that is an estimate of the maximum number of users * that will be concurrently using eCryptfs. Set this to the right * value to balance performance and memory use. */ unsigned int ecryptfs_number_of_users = ECRYPTFS_DEFAULT_NUM_USERS; module_param(ecryptfs_number_of_users, uint, 0); MODULE_PARM_DESC(ecryptfs_number_of_users, "An estimate of the number of " "concurrent users of eCryptfs"); void __ecryptfs_printk(const char *fmt, ...) { va_list args; va_start(args, fmt); if (fmt[1] == '7') { /* KERN_DEBUG */ if (ecryptfs_verbosity >= 1) vprintk(fmt, args); } else vprintk(fmt, args); va_end(args); } /** * ecryptfs_init_lower_file * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with * the lower dentry and the lower mount set * * eCryptfs only ever keeps a single open file for every lower * inode. All I/O operations to the lower inode occur through that * file. When the first eCryptfs dentry that interposes with the first * lower dentry for that inode is created, this function creates the * lower file struct and associates it with the eCryptfs * inode. When all eCryptfs files associated with the inode are released, the * file is closed. * * The lower file will be opened with read/write permissions, if * possible. Otherwise, it is opened read-only. * * This function does nothing if a lower file is already * associated with the eCryptfs inode. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_lower_file(struct dentry *dentry, struct file **lower_file) { const struct cred *cred = current_cred(); struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); int rc; rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt, cred); if (rc) { printk(KERN_ERR "Error opening lower file " "for lower_dentry [0x%p] and lower_mnt [0x%p]; " "rc = [%d]\n", lower_dentry, lower_mnt, rc); (*lower_file) = NULL; } return rc; } int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode) { struct ecryptfs_inode_info *inode_info; int count, rc = 0; inode_info = ecryptfs_inode_to_private(inode); mutex_lock(&inode_info->lower_file_mutex); count = atomic_inc_return(&inode_info->lower_file_count); if (WARN_ON_ONCE(count < 1)) rc = -EINVAL; else if (count == 1) { rc = ecryptfs_init_lower_file(dentry, &inode_info->lower_file); if (rc) atomic_set(&inode_info->lower_file_count, 0); } mutex_unlock(&inode_info->lower_file_mutex); return rc; } void ecryptfs_put_lower_file(struct inode *inode) { struct ecryptfs_inode_info *inode_info; inode_info = ecryptfs_inode_to_private(inode); if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, &inode_info->lower_file_mutex)) { fput(inode_info->lower_file); inode_info->lower_file = NULL; mutex_unlock(&inode_info->lower_file_mutex); } } enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes, ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, ecryptfs_opt_check_dev_ruid, ecryptfs_opt_err }; static const match_table_t tokens = { {ecryptfs_opt_sig, "sig=%s"}, {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, {ecryptfs_opt_cipher, "cipher=%s"}, {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, {ecryptfs_opt_passthrough, "ecryptfs_passthrough"}, {ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"}, {ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"}, {ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"}, {ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"}, {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, {ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"}, {ecryptfs_opt_err, NULL} }; static int ecryptfs_init_global_auth_toks( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *global_auth_tok; struct ecryptfs_auth_tok *auth_tok; int rc = 0; list_for_each_entry(global_auth_tok, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { rc = ecryptfs_keyring_auth_tok_for_sig( &global_auth_tok->global_auth_tok_key, &auth_tok, global_auth_tok->sig); if (rc) { printk(KERN_ERR "Could not find valid key in user " "session keyring for sig specified in mount " "option: [%s]\n", global_auth_tok->sig); global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; goto out; } else { global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; up_write(&(global_auth_tok->global_auth_tok_key)->sem); } } out: return rc; } static void ecryptfs_init_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { memset((void *)mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat)); INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list); mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex); mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED; } /** * ecryptfs_parse_options * @sb: The ecryptfs super block * @options: The options passed to the kernel * @check_ruid: set to 1 if device uid should be checked against the ruid * * Parse mount options: * debug=N - ecryptfs_verbosity level for debug output * sig=XXX - description(signature) of the key to use * * Returns the dentry object of the lower-level (lower/interposed) * directory; We want to mount our stackable file system on top of * that lower directory. * * The signature of the key to use must be the description of a key * already in the keyring. Mounting will fail if the key can not be * found. * * Returns zero on success; non-zero on error */ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, uid_t *check_ruid) { char *p; int rc = 0; int sig_set = 0; int cipher_name_set = 0; int fn_cipher_name_set = 0; int cipher_key_bytes; int cipher_key_bytes_set = 0; int fn_cipher_key_bytes; int fn_cipher_key_bytes_set = 0; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &sbi->mount_crypt_stat; substring_t args[MAX_OPT_ARGS]; int token; char *sig_src; char *cipher_name_dst; char *cipher_name_src; char *fn_cipher_name_dst; char *fn_cipher_name_src; char *fnek_dst; char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; *check_ruid = 0; if (!options) { rc = -EINVAL; goto out; } ecryptfs_init_mount_crypt_stat(mount_crypt_stat); while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case ecryptfs_opt_sig: case ecryptfs_opt_ecryptfs_sig: sig_src = args[0].from; rc = ecryptfs_add_global_auth_tok(mount_crypt_stat, sig_src, 0); if (rc) { printk(KERN_ERR "Error attempting to register " "global sig; rc = [%d]\n", rc); goto out; } sig_set = 1; break; case ecryptfs_opt_cipher: case ecryptfs_opt_ecryptfs_cipher: cipher_name_src = args[0].from; cipher_name_dst = mount_crypt_stat-> global_default_cipher_name; strncpy(cipher_name_dst, cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; cipher_name_set = 1; break; case ecryptfs_opt_ecryptfs_key_bytes: cipher_key_bytes_src = args[0].from; cipher_key_bytes = (int)simple_strtol(cipher_key_bytes_src, &cipher_key_bytes_src, 0); mount_crypt_stat->global_default_cipher_key_size = cipher_key_bytes; cipher_key_bytes_set = 1; break; case ecryptfs_opt_passthrough: mount_crypt_stat->flags |= ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED; break; case ecryptfs_opt_xattr_metadata: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; break; case ecryptfs_opt_encrypted_view: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED; break; case ecryptfs_opt_fnek_sig: fnek_src = args[0].from; fnek_dst = mount_crypt_stat->global_default_fnek_sig; strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX); mount_crypt_stat->global_default_fnek_sig[ ECRYPTFS_SIG_SIZE_HEX] = '\0'; rc = ecryptfs_add_global_auth_tok( mount_crypt_stat, mount_crypt_stat->global_default_fnek_sig, ECRYPTFS_AUTH_TOK_FNEK); if (rc) { printk(KERN_ERR "Error attempting to register " "global fnek sig [%s]; rc = [%d]\n", mount_crypt_stat->global_default_fnek_sig, rc); goto out; } mount_crypt_stat->flags |= (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK); break; case ecryptfs_opt_fn_cipher: fn_cipher_name_src = args[0].from; fn_cipher_name_dst = mount_crypt_stat->global_default_fn_cipher_name; strncpy(fn_cipher_name_dst, fn_cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); mount_crypt_stat->global_default_fn_cipher_name[ ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; fn_cipher_name_set = 1; break; case ecryptfs_opt_fn_cipher_key_bytes: fn_cipher_key_bytes_src = args[0].from; fn_cipher_key_bytes = (int)simple_strtol(fn_cipher_key_bytes_src, &fn_cipher_key_bytes_src, 0); mount_crypt_stat->global_default_fn_cipher_key_bytes = fn_cipher_key_bytes; fn_cipher_key_bytes_set = 1; break; case ecryptfs_opt_unlink_sigs: mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS; break; case ecryptfs_opt_mount_auth_tok_only: mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; case ecryptfs_opt_check_dev_ruid: *check_ruid = 1; break; case ecryptfs_opt_err: default: printk(KERN_WARNING "%s: eCryptfs: unrecognized option [%s]\n", __func__, p); } } if (!sig_set) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "You must supply at least one valid " "auth tok signature as a mount " "parameter; see the eCryptfs README\n"); goto out; } if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_name_set) strcpy(mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_cipher_name); if (!cipher_key_bytes_set) mount_crypt_stat->global_default_cipher_key_size = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !ecryptfs_tfm_exists( mount_crypt_stat->global_default_fn_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } mutex_unlock(&key_tfm_list_mutex); rc = ecryptfs_init_global_auth_toks(mount_crypt_stat); if (rc) printk(KERN_WARNING "One or more global auth toks could not " "properly register; rc = [%d]\n", rc); out: return rc; } struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; /** * ecryptfs_get_sb * @fs_type * @flags * @dev_name: The path to mount over * @raw_data: The options passed into the kernel */ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { struct super_block *s; struct ecryptfs_sb_info *sbi; struct ecryptfs_dentry_info *root_info; const char *err = "Getting sb failed"; struct inode *inode; struct path path; uid_t check_ruid; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); if (!sbi) { rc = -ENOMEM; goto out; } rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; goto out; } s = sget(fs_type, NULL, set_anon_super, NULL); if (IS_ERR(s)) { rc = PTR_ERR(s); goto out; } s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); if (rc) goto out1; ecryptfs_set_superblock_private(s, sbi); s->s_bdi = &sbi->bdi; /* ->kill_sb() will take care of sbi after that point */ sbi = NULL; s->s_op = &ecryptfs_sops; s->s_d_op = &ecryptfs_dops; err = "Reading sb failed"; rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); if (rc) { ecryptfs_printk(KERN_WARNING, "kern_path() failed\n"); goto out1; } if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { rc = -EINVAL; printk(KERN_ERR "Mount on filesystem of type " "eCryptfs explicitly disallowed due to " "known incompatibilities\n"); goto out_free; } if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) { rc = -EPERM; printk(KERN_ERR "Mount of device (uid: %d) not owned by " "requested user (uid: %d)\n", path.dentry->d_inode->i_uid, current_uid()); goto out_free; } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; inode = ecryptfs_get_inode(path.dentry->d_inode, s); rc = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free; s->s_root = d_make_root(inode); if (!s->s_root) { rc = -ENOMEM; goto out_free; } rc = -ENOMEM; root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL); if (!root_info) goto out_free; /* ->kill_sb() will take care of root_info */ ecryptfs_set_dentry_private(s->s_root, root_info); ecryptfs_set_dentry_lower(s->s_root, path.dentry); ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt); s->s_flags |= MS_ACTIVE; return dget(s->s_root); out_free: path_put(&path); out1: deactivate_locked_super(s); out: if (sbi) { ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat); kmem_cache_free(ecryptfs_sb_info_cache, sbi); } printk(KERN_ERR "%s; rc = [%d]\n", err, rc); return ERR_PTR(rc); } /** * ecryptfs_kill_block_super * @sb: The ecryptfs super block * * Used to bring the superblock down and free the private data. */ static void ecryptfs_kill_block_super(struct super_block *sb) { struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb); kill_anon_super(sb); if (!sb_info) return; ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat); bdi_destroy(&sb_info->bdi); kmem_cache_free(ecryptfs_sb_info_cache, sb_info); } static struct file_system_type ecryptfs_fs_type = { .owner = THIS_MODULE, .name = "ecryptfs", .mount = ecryptfs_mount, .kill_sb = ecryptfs_kill_block_super, .fs_flags = 0 }; /** * inode_info_init_once * * Initializes the ecryptfs_inode_info_cache when it is created */ static void inode_info_init_once(void *vptr) { struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; inode_init_once(&ei->vfs_inode); } static struct ecryptfs_cache_info { struct kmem_cache **cache; const char *name; size_t size; void (*ctor)(void *obj); } ecryptfs_cache_infos[] = { { .cache = &ecryptfs_auth_tok_list_item_cache, .name = "ecryptfs_auth_tok_list_item", .size = sizeof(struct ecryptfs_auth_tok_list_item), }, { .cache = &ecryptfs_file_info_cache, .name = "ecryptfs_file_cache", .size = sizeof(struct ecryptfs_file_info), }, { .cache = &ecryptfs_dentry_info_cache, .name = "ecryptfs_dentry_info_cache", .size = sizeof(struct ecryptfs_dentry_info), }, { .cache = &ecryptfs_inode_info_cache, .name = "ecryptfs_inode_cache", .size = sizeof(struct ecryptfs_inode_info), .ctor = inode_info_init_once, }, { .cache = &ecryptfs_sb_info_cache, .name = "ecryptfs_sb_cache", .size = sizeof(struct ecryptfs_sb_info), }, { .cache = &ecryptfs_header_cache, .name = "ecryptfs_headers", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_xattr_cache, .name = "ecryptfs_xattr_cache", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_key_record_cache, .name = "ecryptfs_key_record_cache", .size = sizeof(struct ecryptfs_key_record), }, { .cache = &ecryptfs_key_sig_cache, .name = "ecryptfs_key_sig_cache", .size = sizeof(struct ecryptfs_key_sig), }, { .cache = &ecryptfs_global_auth_tok_cache, .name = "ecryptfs_global_auth_tok_cache", .size = sizeof(struct ecryptfs_global_auth_tok), }, { .cache = &ecryptfs_key_tfm_cache, .name = "ecryptfs_key_tfm_cache", .size = sizeof(struct ecryptfs_key_tfm), }, { .cache = &ecryptfs_open_req_cache, .name = "ecryptfs_open_req_cache", .size = sizeof(struct ecryptfs_open_req), }, }; static void ecryptfs_free_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; if (*(info->cache)) kmem_cache_destroy(*(info->cache)); } } /** * ecryptfs_init_kmem_caches * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; *(info->cache) = kmem_cache_create(info->name, info->size, 0, SLAB_HWCACHE_ALIGN, info->ctor); if (!*(info->cache)) { ecryptfs_free_kmem_caches(); ecryptfs_printk(KERN_WARNING, "%s: " "kmem_cache_create failed\n", info->name); return -ENOMEM; } } return 0; } static struct kobject *ecryptfs_kobj; static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK); } static struct kobj_attribute version_attr = __ATTR_RO(version); static struct attribute *attributes[] = { &version_attr.attr, NULL, }; static struct attribute_group attr_group = { .attrs = attributes, }; static int do_sysfs_registration(void) { int rc; ecryptfs_kobj = kobject_create_and_add("ecryptfs", fs_kobj); if (!ecryptfs_kobj) { printk(KERN_ERR "Unable to create ecryptfs kset\n"); rc = -ENOMEM; goto out; } rc = sysfs_create_group(ecryptfs_kobj, &attr_group); if (rc) { printk(KERN_ERR "Unable to create ecryptfs version attributes\n"); kobject_put(ecryptfs_kobj); } out: return rc; } static void do_sysfs_unregistration(void) { sysfs_remove_group(ecryptfs_kobj, &attr_group); kobject_put(ecryptfs_kobj); } static int __init ecryptfs_init(void) { int rc; if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " "eCryptfs cannot run on this system. The " "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, (unsigned long)PAGE_CACHE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); if (rc) { printk(KERN_ERR "Failed to allocate one or more kmem_cache objects\n"); goto out; } rc = do_sysfs_registration(); if (rc) { printk(KERN_ERR "sysfs registration failed\n"); goto out_free_kmem_caches; } rc = ecryptfs_init_kthread(); if (rc) { printk(KERN_ERR "%s: kthread initialization failed; " "rc = [%d]\n", __func__, rc); goto out_do_sysfs_unregistration; } rc = ecryptfs_init_messaging(); if (rc) { printk(KERN_ERR "Failure occurred while attempting to " "initialize the communications channel to " "ecryptfsd\n"); goto out_destroy_kthread; } rc = ecryptfs_init_crypto(); if (rc) { printk(KERN_ERR "Failure whilst attempting to init crypto; " "rc = [%d]\n", rc); goto out_release_messaging; } rc = register_filesystem(&ecryptfs_fs_type); if (rc) { printk(KERN_ERR "Failed to register filesystem\n"); goto out_destroy_crypto; } if (ecryptfs_verbosity > 0) printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " "will be written to the syslog!\n", ecryptfs_verbosity); goto out; out_destroy_crypto: ecryptfs_destroy_crypto(); out_release_messaging: ecryptfs_release_messaging(); out_destroy_kthread: ecryptfs_destroy_kthread(); out_do_sysfs_unregistration: do_sysfs_unregistration(); out_free_kmem_caches: ecryptfs_free_kmem_caches(); out: return rc; } static void __exit ecryptfs_exit(void) { int rc; rc = ecryptfs_destroy_crypto(); if (rc) printk(KERN_ERR "Failure whilst attempting to destroy crypto; " "rc = [%d]\n", rc); ecryptfs_release_messaging(); ecryptfs_destroy_kthread(); do_sysfs_unregistration(); unregister_filesystem(&ecryptfs_fs_type); ecryptfs_free_kmem_caches(); } MODULE_AUTHOR("Michael A. Halcrow <mhalcrow@us.ibm.com>"); MODULE_DESCRIPTION("eCryptfs"); MODULE_LICENSE("GPL"); module_init(ecryptfs_init) module_exit(ecryptfs_exit)
gpl-2.0
evan6200/cirrfy_pos
drivers/mmc/host/wbsd.c
2816
40832
/* * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver * * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * * Warning! * * Changes to the FIFO system should be done with extreme care since * the hardware is full of bugs related to the FIFO. Known issues are: * * - FIFO size field in FSR is always zero. * * - FIFO interrupts tend not to work as they should. Interrupts are * triggered only for full/empty events, not for threshold values. * * - On APIC systems the FIFO empty interrupt is sometimes lost. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/pnp.h> #include <linux/highmem.h> #include <linux/mmc/host.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/dma.h> #include "wbsd.h" #define DRIVER_NAME "wbsd" #define DBG(x...) \ pr_debug(DRIVER_NAME ": " x) #define DBGF(f, x...) \ pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x) /* * Device resources */ #ifdef CONFIG_PNP static const struct pnp_device_id pnp_dev_table[] = { { "WEC0517", 0 }, { "WEC0518", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(pnp, pnp_dev_table); #endif /* CONFIG_PNP */ static const int config_ports[] = { 0x2E, 0x4E }; static const int unlock_codes[] = { 0x83, 0x87 }; static const int valid_ids[] = { 0x7112, }; #ifdef CONFIG_PNP static unsigned int param_nopnp = 0; #else static const unsigned int param_nopnp = 1; #endif static unsigned int param_io = 0x248; static unsigned int param_irq = 6; static int param_dma = 2; /* * Basic functions */ static inline void wbsd_unlock_config(struct wbsd_host *host) { BUG_ON(host->config == 0); outb(host->unlock_code, host->config); outb(host->unlock_code, host->config); } static inline void wbsd_lock_config(struct wbsd_host *host) { BUG_ON(host->config == 0); outb(LOCK_CODE, host->config); } static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value) { BUG_ON(host->config == 0); outb(reg, host->config); outb(value, host->config + 1); } static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg) { BUG_ON(host->config == 0); outb(reg, host->config); return inb(host->config + 1); } static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value) { outb(index, host->base + WBSD_IDXR); outb(value, host->base + WBSD_DATAR); } static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index) { outb(index, host->base + WBSD_IDXR); return inb(host->base + WBSD_DATAR); } /* * Common routines */ static void wbsd_init_device(struct wbsd_host *host) { u8 setup, ier; /* * Reset chip (SD/MMC part) and fifo. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * Set DAT3 to input */ setup &= ~WBSD_DAT3_H; wbsd_write_index(host, WBSD_IDX_SETUP, setup); host->flags &= ~WBSD_FIGNORE_DETECT; /* * Read back default clock. */ host->clk = wbsd_read_index(host, WBSD_IDX_CLK); /* * Power down port. */ outb(WBSD_POWER_N, host->base + WBSD_CSR); /* * Set maximum timeout. */ wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F); /* * Test for card presence */ if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT) host->flags |= WBSD_FCARD_PRESENT; else host->flags &= ~WBSD_FCARD_PRESENT; /* * Enable interesting interrupts. */ ier = 0; ier |= WBSD_EINT_CARD; ier |= WBSD_EINT_FIFO_THRE; ier |= WBSD_EINT_CRC; ier |= WBSD_EINT_TIMEOUT; ier |= WBSD_EINT_TC; outb(ier, host->base + WBSD_EIR); /* * Clear interrupts. */ inb(host->base + WBSD_ISR); } static void wbsd_reset(struct wbsd_host *host) { u8 setup; printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc)); /* * Soft reset of chip (SD/MMC part). */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_SOFT_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); } static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq) { unsigned long dmaflags; if (host->dma >= 0) { /* * Release ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); release_dma_lock(dmaflags); /* * Disable DMA on host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); } host->mrq = NULL; /* * MMC layer might call back into the driver so first unlock. */ spin_unlock(&host->lock); mmc_request_done(host->mmc, mrq); spin_lock(&host->lock); } /* * Scatter/gather functions */ static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data) { /* * Get info. about SG list from data structure. */ host->cur_sg = data->sg; host->num_sg = data->sg_len; host->offset = 0; host->remain = host->cur_sg->length; } static inline int wbsd_next_sg(struct wbsd_host *host) { /* * Skip to next SG entry. */ host->cur_sg++; host->num_sg--; /* * Any entries left? */ if (host->num_sg > 0) { host->offset = 0; host->remain = host->cur_sg->length; } return host->num_sg; } static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) { return sg_virt(host->cur_sg); } static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) { unsigned int len, i; struct scatterlist *sg; char *dmabuf = host->dma_buffer; char *sgbuf; sg = data->sg; len = data->sg_len; for (i = 0; i < len; i++) { sgbuf = sg_virt(&sg[i]); memcpy(dmabuf, sgbuf, sg[i].length); dmabuf += sg[i].length; } } static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) { unsigned int len, i; struct scatterlist *sg; char *dmabuf = host->dma_buffer; char *sgbuf; sg = data->sg; len = data->sg_len; for (i = 0; i < len; i++) { sgbuf = sg_virt(&sg[i]); memcpy(sgbuf, dmabuf, sg[i].length); dmabuf += sg[i].length; } } /* * Command handling */ static inline void wbsd_get_short_reply(struct wbsd_host *host, struct mmc_command *cmd) { /* * Correct response type? */ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) { cmd->error = -EILSEQ; return; } cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0; cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24; } static inline void wbsd_get_long_reply(struct wbsd_host *host, struct mmc_command *cmd) { int i; /* * Correct response type? */ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) { cmd->error = -EILSEQ; return; } for (i = 0; i < 4; i++) { cmd->resp[i] = wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0; } } static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd) { int i; u8 status, isr; /* * Clear accumulated ISR. The interrupt routine * will fill this one with events that occur during * transfer. */ host->isr = 0; /* * Send the command (CRC calculated by host). */ outb(cmd->opcode, host->base + WBSD_CMDR); for (i = 3; i >= 0; i--) outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR); cmd->error = 0; /* * Wait for the request to complete. */ do { status = wbsd_read_index(host, WBSD_IDX_STATUS); } while (status & WBSD_CARDTRAFFIC); /* * Do we expect a reply? */ if (cmd->flags & MMC_RSP_PRESENT) { /* * Read back status. */ isr = host->isr; /* Card removed? */ if (isr & WBSD_INT_CARD) cmd->error = -ENOMEDIUM; /* Timeout? */ else if (isr & WBSD_INT_TIMEOUT) cmd->error = -ETIMEDOUT; /* CRC? */ else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC)) cmd->error = -EILSEQ; /* All ok */ else { if (cmd->flags & MMC_RSP_136) wbsd_get_long_reply(host, cmd); else wbsd_get_short_reply(host, cmd); } } } /* * Data functions */ static void wbsd_empty_fifo(struct wbsd_host *host) { struct mmc_data *data = host->mrq->cmd->data; char *buffer; int i, fsr, fifo; /* * Handle excessive data. */ if (host->num_sg == 0) return; buffer = wbsd_sg_to_buffer(host) + host->offset; /* * Drain the fifo. This has a tendency to loop longer * than the FIFO length (usually one block). */ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) { /* * The size field in the FSR is broken so we have to * do some guessing. */ if (fsr & WBSD_FIFO_FULL) fifo = 16; else if (fsr & WBSD_FIFO_FUTHRE) fifo = 8; else fifo = 1; for (i = 0; i < fifo; i++) { *buffer = inb(host->base + WBSD_DFR); buffer++; host->offset++; host->remain--; data->bytes_xfered++; /* * End of scatter list entry? */ if (host->remain == 0) { /* * Get next entry. Check if last. */ if (!wbsd_next_sg(host)) return; buffer = wbsd_sg_to_buffer(host); } } } /* * This is a very dirty hack to solve a * hardware problem. The chip doesn't trigger * FIFO threshold interrupts properly. */ if ((data->blocks * data->blksz - data->bytes_xfered) < 16) tasklet_schedule(&host->fifo_tasklet); } static void wbsd_fill_fifo(struct wbsd_host *host) { struct mmc_data *data = host->mrq->cmd->data; char *buffer; int i, fsr, fifo; /* * Check that we aren't being called after the * entire buffer has been transferred. */ if (host->num_sg == 0) return; buffer = wbsd_sg_to_buffer(host) + host->offset; /* * Fill the fifo. This has a tendency to loop longer * than the FIFO length (usually one block). */ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) { /* * The size field in the FSR is broken so we have to * do some guessing. */ if (fsr & WBSD_FIFO_EMPTY) fifo = 0; else if (fsr & WBSD_FIFO_EMTHRE) fifo = 8; else fifo = 15; for (i = 16; i > fifo; i--) { outb(*buffer, host->base + WBSD_DFR); buffer++; host->offset++; host->remain--; data->bytes_xfered++; /* * End of scatter list entry? */ if (host->remain == 0) { /* * Get next entry. Check if last. */ if (!wbsd_next_sg(host)) return; buffer = wbsd_sg_to_buffer(host); } } } /* * The controller stops sending interrupts for * 'FIFO empty' under certain conditions. So we * need to be a bit more pro-active. */ tasklet_schedule(&host->fifo_tasklet); } static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) { u16 blksize; u8 setup; unsigned long dmaflags; unsigned int size; /* * Calculate size. */ size = data->blocks * data->blksz; /* * Check timeout values for overflow. * (Yes, some cards cause this value to overflow). */ if (data->timeout_ns > 127000000) wbsd_write_index(host, WBSD_IDX_TAAC, 127); else { wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns / 1000000); } if (data->timeout_clks > 255) wbsd_write_index(host, WBSD_IDX_NSAC, 255); else wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks); /* * Inform the chip of how large blocks will be * sent. It needs this to determine when to * calculate CRC. * * Space for CRC must be included in the size. * Two bytes are needed for each data line. */ if (host->bus_width == MMC_BUS_WIDTH_1) { blksize = data->blksz + 2; wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else if (host->bus_width == MMC_BUS_WIDTH_4) { blksize = data->blksz + 2 * 4; wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else { data->error = -EINVAL; return; } /* * Clear the FIFO. This is needed even for DMA * transfers since the chip still uses the FIFO * internally. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_FIFO_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * DMA transfer? */ if (host->dma >= 0) { /* * The buffer for DMA is only 64 kB. */ BUG_ON(size > 0x10000); if (size > 0x10000) { data->error = -EINVAL; return; } /* * Transfer data from the SG list to * the DMA buffer. */ if (data->flags & MMC_DATA_WRITE) wbsd_sg_to_dma(host, data); /* * Initialise the ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); if (data->flags & MMC_DATA_READ) set_dma_mode(host->dma, DMA_MODE_READ & ~0x40); else set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40); set_dma_addr(host->dma, host->dma_addr); set_dma_count(host->dma, size); enable_dma(host->dma); release_dma_lock(dmaflags); /* * Enable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE); } else { /* * This flag is used to keep printk * output to a minimum. */ host->firsterr = 1; /* * Initialise the SG list. */ wbsd_init_sg(host, data); /* * Turn off DMA. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Set up FIFO threshold levels (and fill * buffer if doing a write). */ if (data->flags & MMC_DATA_READ) { wbsd_write_index(host, WBSD_IDX_FIFOEN, WBSD_FIFOEN_FULL | 8); } else { wbsd_write_index(host, WBSD_IDX_FIFOEN, WBSD_FIFOEN_EMPTY | 8); wbsd_fill_fifo(host); } } data->error = 0; } static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data) { unsigned long dmaflags; int count; u8 status; WARN_ON(host->mrq == NULL); /* * Send a stop command if needed. */ if (data->stop) wbsd_send_command(host, data->stop); /* * Wait for the controller to leave data * transfer state. */ do { status = wbsd_read_index(host, WBSD_IDX_STATUS); } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE)); /* * DMA transfer? */ if (host->dma >= 0) { /* * Disable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Turn of ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); count = get_dma_residue(host->dma); release_dma_lock(dmaflags); data->bytes_xfered = host->mrq->data->blocks * host->mrq->data->blksz - count; data->bytes_xfered -= data->bytes_xfered % data->blksz; /* * Any leftover data? */ if (count) { printk(KERN_ERR "%s: Incomplete DMA transfer. " "%d bytes left.\n", mmc_hostname(host->mmc), count); if (!data->error) data->error = -EIO; } else { /* * Transfer data from DMA buffer to * SG list. */ if (data->flags & MMC_DATA_READ) wbsd_dma_to_sg(host, data); } if (data->error) { if (data->bytes_xfered) data->bytes_xfered -= data->blksz; } } wbsd_request_end(host, host->mrq); } /*****************************************************************************\ * * * MMC layer callbacks * * * \*****************************************************************************/ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct wbsd_host *host = mmc_priv(mmc); struct mmc_command *cmd; /* * Disable tasklets to avoid a deadlock. */ spin_lock_bh(&host->lock); BUG_ON(host->mrq != NULL); cmd = mrq->cmd; host->mrq = mrq; /* * Check that there is actually a card in the slot. */ if (!(host->flags & WBSD_FCARD_PRESENT)) { cmd->error = -ENOMEDIUM; goto done; } if (cmd->data) { /* * The hardware is so delightfully stupid that it has a list * of "data" commands. If a command isn't on this list, it'll * just go back to the idle state and won't send any data * interrupts. */ switch (cmd->opcode) { case 11: case 17: case 18: case 20: case 24: case 25: case 26: case 27: case 30: case 42: case 56: break; /* ACMDs. We don't keep track of state, so we just treat them * like any other command. */ case 51: break; default: #ifdef CONFIG_MMC_DEBUG printk(KERN_WARNING "%s: Data command %d is not " "supported by this controller.\n", mmc_hostname(host->mmc), cmd->opcode); #endif cmd->error = -EINVAL; goto done; }; } /* * Does the request include data? */ if (cmd->data) { wbsd_prepare_data(host, cmd->data); if (cmd->data->error) goto done; } wbsd_send_command(host, cmd); /* * If this is a data transfer the request * will be finished after the data has * transferred. */ if (cmd->data && !cmd->error) { /* * Dirty fix for hardware bug. */ if (host->dma == -1) tasklet_schedule(&host->fifo_tasklet); spin_unlock_bh(&host->lock); return; } done: wbsd_request_end(host, mrq); spin_unlock_bh(&host->lock); } static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct wbsd_host *host = mmc_priv(mmc); u8 clk, setup, pwr; spin_lock_bh(&host->lock); /* * Reset the chip on each power off. * Should clear out any weird states. */ if (ios->power_mode == MMC_POWER_OFF) wbsd_init_device(host); if (ios->clock >= 24000000) clk = WBSD_CLK_24M; else if (ios->clock >= 16000000) clk = WBSD_CLK_16M; else if (ios->clock >= 12000000) clk = WBSD_CLK_12M; else clk = WBSD_CLK_375K; /* * Only write to the clock register when * there is an actual change. */ if (clk != host->clk) { wbsd_write_index(host, WBSD_IDX_CLK, clk); host->clk = clk; } /* * Power up card. */ if (ios->power_mode != MMC_POWER_OFF) { pwr = inb(host->base + WBSD_CSR); pwr &= ~WBSD_POWER_N; outb(pwr, host->base + WBSD_CSR); } /* * MMC cards need to have pin 1 high during init. * It wreaks havoc with the card detection though so * that needs to be disabled. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); if (ios->chip_select == MMC_CS_HIGH) { BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1); setup |= WBSD_DAT3_H; host->flags |= WBSD_FIGNORE_DETECT; } else { if (setup & WBSD_DAT3_H) { setup &= ~WBSD_DAT3_H; /* * We cannot resume card detection immediately * because of capacitance and delays in the chip. */ mod_timer(&host->ignore_timer, jiffies + HZ / 100); } } wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * Store bus width for later. Will be used when * setting up the data transfer. */ host->bus_width = ios->bus_width; spin_unlock_bh(&host->lock); } static int wbsd_get_ro(struct mmc_host *mmc) { struct wbsd_host *host = mmc_priv(mmc); u8 csr; spin_lock_bh(&host->lock); csr = inb(host->base + WBSD_CSR); csr |= WBSD_MSLED; outb(csr, host->base + WBSD_CSR); mdelay(1); csr = inb(host->base + WBSD_CSR); csr &= ~WBSD_MSLED; outb(csr, host->base + WBSD_CSR); spin_unlock_bh(&host->lock); return !!(csr & WBSD_WRPT); } static const struct mmc_host_ops wbsd_ops = { .request = wbsd_request, .set_ios = wbsd_set_ios, .get_ro = wbsd_get_ro, }; /*****************************************************************************\ * * * Interrupt handling * * * \*****************************************************************************/ /* * Helper function to reset detection ignore */ static void wbsd_reset_ignore(unsigned long data) { struct wbsd_host *host = (struct wbsd_host *)data; BUG_ON(host == NULL); DBG("Resetting card detection ignore\n"); spin_lock_bh(&host->lock); host->flags &= ~WBSD_FIGNORE_DETECT; /* * Card status might have changed during the * blackout. */ tasklet_schedule(&host->card_tasklet); spin_unlock_bh(&host->lock); } /* * Tasklets */ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) { WARN_ON(!host->mrq); if (!host->mrq) return NULL; WARN_ON(!host->mrq->cmd); if (!host->mrq->cmd) return NULL; WARN_ON(!host->mrq->cmd->data); if (!host->mrq->cmd->data) return NULL; return host->mrq->cmd->data; } static void wbsd_tasklet_card(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; u8 csr; int delay = -1; spin_lock(&host->lock); if (host->flags & WBSD_FIGNORE_DETECT) { spin_unlock(&host->lock); return; } csr = inb(host->base + WBSD_CSR); WARN_ON(csr == 0xff); if (csr & WBSD_CARDPRESENT) { if (!(host->flags & WBSD_FCARD_PRESENT)) { DBG("Card inserted\n"); host->flags |= WBSD_FCARD_PRESENT; delay = 500; } } else if (host->flags & WBSD_FCARD_PRESENT) { DBG("Card removed\n"); host->flags &= ~WBSD_FCARD_PRESENT; if (host->mrq) { printk(KERN_ERR "%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); wbsd_reset(host); host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } delay = 0; } /* * Unlock first since we might get a call back. */ spin_unlock(&host->lock); if (delay != -1) mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); } static void wbsd_tasklet_fifo(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; if (data->flags & MMC_DATA_WRITE) wbsd_fill_fifo(host); else wbsd_empty_fifo(host); /* * Done? */ if (host->num_sg == 0) { wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); tasklet_schedule(&host->finish_tasklet); } end: spin_unlock(&host->lock); } static void wbsd_tasklet_crc(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; DBGF("CRC error\n"); data->error = -EILSEQ; tasklet_schedule(&host->finish_tasklet); end: spin_unlock(&host->lock); } static void wbsd_tasklet_timeout(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; DBGF("Timeout\n"); data->error = -ETIMEDOUT; tasklet_schedule(&host->finish_tasklet); end: spin_unlock(&host->lock); } static void wbsd_tasklet_finish(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); WARN_ON(!host->mrq); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; wbsd_finish_data(host, data); end: spin_unlock(&host->lock); } /* * Interrupt handling */ static irqreturn_t wbsd_irq(int irq, void *dev_id) { struct wbsd_host *host = dev_id; int isr; isr = inb(host->base + WBSD_ISR); /* * Was it actually our hardware that caused the interrupt? */ if (isr == 0xff || isr == 0x00) return IRQ_NONE; host->isr |= isr; /* * Schedule tasklets as needed. */ if (isr & WBSD_INT_CARD) tasklet_schedule(&host->card_tasklet); if (isr & WBSD_INT_FIFO_THRE) tasklet_schedule(&host->fifo_tasklet); if (isr & WBSD_INT_CRC) tasklet_hi_schedule(&host->crc_tasklet); if (isr & WBSD_INT_TIMEOUT) tasklet_hi_schedule(&host->timeout_tasklet); if (isr & WBSD_INT_TC) tasklet_schedule(&host->finish_tasklet); return IRQ_HANDLED; } /*****************************************************************************\ * * * Device initialisation and shutdown * * * \*****************************************************************************/ /* * Allocate/free MMC structure. */ static int __devinit wbsd_alloc_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; /* * Allocate MMC structure. */ mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); host->mmc = mmc; host->dma = -1; /* * Set host parameters. */ mmc->ops = &wbsd_ops; mmc->f_min = 375000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; spin_lock_init(&host->lock); /* * Set up timers */ init_timer(&host->ignore_timer); host->ignore_timer.data = (unsigned long)host; host->ignore_timer.function = wbsd_reset_ignore; /* * Maximum number of segments. Worst case is one sector per segment * so this will be 64kB/512. */ mmc->max_segs = 128; /* * Maximum request size. Also limited by 64KiB buffer. */ mmc->max_req_size = 65536; /* * Maximum segment size. Could be one segment with the maximum number * of bytes. */ mmc->max_seg_size = mmc->max_req_size; /* * Maximum block size. We have 12 bits (= 4095) but have to subtract * space for CRC. So the maximum is 4095 - 4*2 = 4087. */ mmc->max_blk_size = 4087; /* * Maximum block count. There is no real limit so the maximum * request size will be the only restriction. */ mmc->max_blk_count = mmc->max_req_size; dev_set_drvdata(dev, mmc); return 0; } static void wbsd_free_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; mmc = dev_get_drvdata(dev); if (!mmc) return; host = mmc_priv(mmc); BUG_ON(host == NULL); del_timer_sync(&host->ignore_timer); mmc_free_host(mmc); dev_set_drvdata(dev, NULL); } /* * Scan for known chip id:s */ static int __devinit wbsd_scan(struct wbsd_host *host) { int i, j, k; int id; /* * Iterate through all ports, all codes to * find hardware that is in our known list. */ for (i = 0; i < ARRAY_SIZE(config_ports); i++) { if (!request_region(config_ports[i], 2, DRIVER_NAME)) continue; for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) { id = 0xFFFF; host->config = config_ports[i]; host->unlock_code = unlock_codes[j]; wbsd_unlock_config(host); outb(WBSD_CONF_ID_HI, config_ports[i]); id = inb(config_ports[i] + 1) << 8; outb(WBSD_CONF_ID_LO, config_ports[i]); id |= inb(config_ports[i] + 1); wbsd_lock_config(host); for (k = 0; k < ARRAY_SIZE(valid_ids); k++) { if (id == valid_ids[k]) { host->chip_id = id; return 0; } } if (id != 0xFFFF) { DBG("Unknown hardware (id %x) found at %x\n", id, config_ports[i]); } } release_region(config_ports[i], 2); } host->config = 0; host->unlock_code = 0; return -ENODEV; } /* * Allocate/free io port ranges */ static int __devinit wbsd_request_region(struct wbsd_host *host, int base) { if (base & 0x7) return -EINVAL; if (!request_region(base, 8, DRIVER_NAME)) return -EIO; host->base = base; return 0; } static void wbsd_release_regions(struct wbsd_host *host) { if (host->base) release_region(host->base, 8); host->base = 0; if (host->config) release_region(host->config, 2); host->config = 0; } /* * Allocate/free DMA port and buffer */ static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma) { if (dma < 0) return; if (request_dma(dma, DRIVER_NAME)) goto err; /* * We need to allocate a special buffer in * order for ISA to be able to DMA to it. */ host->dma_buffer = kmalloc(WBSD_DMA_SIZE, GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN); if (!host->dma_buffer) goto free; /* * Translate the address to a physical address. */ host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); /* * ISA DMA must be aligned on a 64k basis. */ if ((host->dma_addr & 0xffff) != 0) goto kfree; /* * ISA cannot access memory above 16 MB. */ else if (host->dma_addr >= 0x1000000) goto kfree; host->dma = dma; return; kfree: /* * If we've gotten here then there is some kind of alignment bug */ BUG_ON(1); dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); host->dma_addr = 0; kfree(host->dma_buffer); host->dma_buffer = NULL; free: free_dma(dma); err: printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. " "Falling back on FIFO.\n", dma); } static void wbsd_release_dma(struct wbsd_host *host) { if (host->dma_addr) { dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); } kfree(host->dma_buffer); if (host->dma >= 0) free_dma(host->dma); host->dma = -1; host->dma_buffer = NULL; host->dma_addr = 0; } /* * Allocate/free IRQ. */ static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq) { int ret; /* * Set up tasklets. Must be done before requesting interrupt. */ tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host); tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host); tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host); tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host); tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host); /* * Allocate interrupt. */ ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host); if (ret) return ret; host->irq = irq; return 0; } static void wbsd_release_irq(struct wbsd_host *host) { if (!host->irq) return; free_irq(host->irq, host); host->irq = 0; tasklet_kill(&host->card_tasklet); tasklet_kill(&host->fifo_tasklet); tasklet_kill(&host->crc_tasklet); tasklet_kill(&host->timeout_tasklet); tasklet_kill(&host->finish_tasklet); } /* * Allocate all resources for the host. */ static int __devinit wbsd_request_resources(struct wbsd_host *host, int base, int irq, int dma) { int ret; /* * Allocate I/O ports. */ ret = wbsd_request_region(host, base); if (ret) return ret; /* * Allocate interrupt. */ ret = wbsd_request_irq(host, irq); if (ret) return ret; /* * Allocate DMA. */ wbsd_request_dma(host, dma); return 0; } /* * Release all resources for the host. */ static void wbsd_release_resources(struct wbsd_host *host) { wbsd_release_dma(host); wbsd_release_irq(host); wbsd_release_regions(host); } /* * Configure the resources the chip should use. */ static void wbsd_chip_config(struct wbsd_host *host) { wbsd_unlock_config(host); /* * Reset the chip. */ wbsd_write_config(host, WBSD_CONF_SWRST, 1); wbsd_write_config(host, WBSD_CONF_SWRST, 0); /* * Select SD/MMC function. */ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); /* * Set up card detection. */ wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11); /* * Configure chip */ wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8); wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff); wbsd_write_config(host, WBSD_CONF_IRQ, host->irq); if (host->dma >= 0) wbsd_write_config(host, WBSD_CONF_DRQ, host->dma); /* * Enable and power up chip. */ wbsd_write_config(host, WBSD_CONF_ENABLE, 1); wbsd_write_config(host, WBSD_CONF_POWER, 0x20); wbsd_lock_config(host); } /* * Check that configured resources are correct. */ static int wbsd_chip_validate(struct wbsd_host *host) { int base, irq, dma; wbsd_unlock_config(host); /* * Select SD/MMC function. */ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); /* * Read configuration. */ base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8; base |= wbsd_read_config(host, WBSD_CONF_PORT_LO); irq = wbsd_read_config(host, WBSD_CONF_IRQ); dma = wbsd_read_config(host, WBSD_CONF_DRQ); wbsd_lock_config(host); /* * Validate against given configuration. */ if (base != host->base) return 0; if (irq != host->irq) return 0; if ((dma != host->dma) && (host->dma != -1)) return 0; return 1; } /* * Powers down the SD function */ static void wbsd_chip_poweroff(struct wbsd_host *host) { wbsd_unlock_config(host); wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); wbsd_write_config(host, WBSD_CONF_ENABLE, 0); wbsd_lock_config(host); } /*****************************************************************************\ * * * Devices setup and shutdown * * * \*****************************************************************************/ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, int pnp) { struct wbsd_host *host = NULL; struct mmc_host *mmc = NULL; int ret; ret = wbsd_alloc_mmc(dev); if (ret) return ret; mmc = dev_get_drvdata(dev); host = mmc_priv(mmc); /* * Scan for hardware. */ ret = wbsd_scan(host); if (ret) { if (pnp && (ret == -ENODEV)) { printk(KERN_WARNING DRIVER_NAME ": Unable to confirm device presence. You may " "experience lock-ups.\n"); } else { wbsd_free_mmc(dev); return ret; } } /* * Request resources. */ ret = wbsd_request_resources(host, base, irq, dma); if (ret) { wbsd_release_resources(host); wbsd_free_mmc(dev); return ret; } /* * See if chip needs to be configured. */ if (pnp) { if ((host->config != 0) && !wbsd_chip_validate(host)) { printk(KERN_WARNING DRIVER_NAME ": PnP active but chip not configured! " "You probably have a buggy BIOS. " "Configuring chip manually.\n"); wbsd_chip_config(host); } } else wbsd_chip_config(host); /* * Power Management stuff. No idea how this works. * Not tested. */ #ifdef CONFIG_PM if (host->config) { wbsd_unlock_config(host); wbsd_write_config(host, WBSD_CONF_PME, 0xA0); wbsd_lock_config(host); } #endif /* * Allow device to initialise itself properly. */ mdelay(5); /* * Reset the chip into a known state. */ wbsd_init_device(host); mmc_add_host(mmc); printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc)); if (host->chip_id != 0) printk(" id %x", (int)host->chip_id); printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); if (host->dma >= 0) printk(" dma %d", (int)host->dma); else printk(" FIFO"); if (pnp) printk(" PnP"); printk("\n"); return 0; } static void __devexit wbsd_shutdown(struct device *dev, int pnp) { struct mmc_host *mmc = dev_get_drvdata(dev); struct wbsd_host *host; if (!mmc) return; host = mmc_priv(mmc); mmc_remove_host(mmc); /* * Power down the SD/MMC function. */ if (!pnp) wbsd_chip_poweroff(host); wbsd_release_resources(host); wbsd_free_mmc(dev); } /* * Non-PnP */ static int __devinit wbsd_probe(struct platform_device *dev) { /* Use the module parameters for resources */ return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); } static int __devexit wbsd_remove(struct platform_device *dev) { wbsd_shutdown(&dev->dev, 0); return 0; } /* * PnP */ #ifdef CONFIG_PNP static int __devinit wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) { int io, irq, dma; /* * Get resources from PnP layer. */ io = pnp_port_start(pnpdev, 0); irq = pnp_irq(pnpdev, 0); if (pnp_dma_valid(pnpdev, 0)) dma = pnp_dma(pnpdev, 0); else dma = -1; DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma); return wbsd_init(&pnpdev->dev, io, irq, dma, 1); } static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) { wbsd_shutdown(&dev->dev, 1); } #endif /* CONFIG_PNP */ /* * Power management */ #ifdef CONFIG_PM static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) { BUG_ON(host == NULL); return mmc_suspend_host(host->mmc); } static int wbsd_resume(struct wbsd_host *host) { BUG_ON(host == NULL); wbsd_init_device(host); return mmc_resume_host(host->mmc); } static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; int ret; if (mmc == NULL) return 0; DBGF("Suspending...\n"); host = mmc_priv(mmc); ret = wbsd_suspend(host, state); if (ret) return ret; wbsd_chip_poweroff(host); return 0; } static int wbsd_platform_resume(struct platform_device *dev) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Resuming...\n"); host = mmc_priv(mmc); wbsd_chip_config(host); /* * Allow device to initialise itself properly. */ mdelay(5); return wbsd_resume(host); } #ifdef CONFIG_PNP static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Suspending...\n"); host = mmc_priv(mmc); return wbsd_suspend(host, state); } static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Resuming...\n"); host = mmc_priv(mmc); /* * See if chip needs to be configured. */ if (host->config != 0) { if (!wbsd_chip_validate(host)) { printk(KERN_WARNING DRIVER_NAME ": PnP active but chip not configured! " "You probably have a buggy BIOS. " "Configuring chip manually.\n"); wbsd_chip_config(host); } } /* * Allow device to initialise itself properly. */ mdelay(5); return wbsd_resume(host); } #endif /* CONFIG_PNP */ #else /* CONFIG_PM */ #define wbsd_platform_suspend NULL #define wbsd_platform_resume NULL #define wbsd_pnp_suspend NULL #define wbsd_pnp_resume NULL #endif /* CONFIG_PM */ static struct platform_device *wbsd_device; static struct platform_driver wbsd_driver = { .probe = wbsd_probe, .remove = __devexit_p(wbsd_remove), .suspend = wbsd_platform_suspend, .resume = wbsd_platform_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; #ifdef CONFIG_PNP static struct pnp_driver wbsd_pnp_driver = { .name = DRIVER_NAME, .id_table = pnp_dev_table, .probe = wbsd_pnp_probe, .remove = __devexit_p(wbsd_pnp_remove), .suspend = wbsd_pnp_suspend, .resume = wbsd_pnp_resume, }; #endif /* CONFIG_PNP */ /* * Module loading/unloading */ static int __init wbsd_drv_init(void) { int result; printk(KERN_INFO DRIVER_NAME ": Winbond W83L51xD SD/MMC card interface driver\n"); printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); #ifdef CONFIG_PNP if (!param_nopnp) { result = pnp_register_driver(&wbsd_pnp_driver); if (result < 0) return result; } #endif /* CONFIG_PNP */ if (param_nopnp) { result = platform_driver_register(&wbsd_driver); if (result < 0) return result; wbsd_device = platform_device_alloc(DRIVER_NAME, -1); if (!wbsd_device) { platform_driver_unregister(&wbsd_driver); return -ENOMEM; } result = platform_device_add(wbsd_device); if (result) { platform_device_put(wbsd_device); platform_driver_unregister(&wbsd_driver); return result; } } return 0; } static void __exit wbsd_drv_exit(void) { #ifdef CONFIG_PNP if (!param_nopnp) pnp_unregister_driver(&wbsd_pnp_driver); #endif /* CONFIG_PNP */ if (param_nopnp) { platform_device_unregister(wbsd_device); platform_driver_unregister(&wbsd_driver); } DBG("unloaded\n"); } module_init(wbsd_drv_init); module_exit(wbsd_drv_exit); #ifdef CONFIG_PNP module_param_named(nopnp, param_nopnp, uint, 0444); #endif module_param_named(io, param_io, uint, 0444); module_param_named(irq, param_irq, uint, 0444); module_param_named(dma, param_dma, int, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); #ifdef CONFIG_PNP MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)"); #endif MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)"); MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)"); MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
gpl-2.0
ModADroid/android-omap-tuna
drivers/mmc/host/wbsd.c
2816
40832
/* * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver * * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * * Warning! * * Changes to the FIFO system should be done with extreme care since * the hardware is full of bugs related to the FIFO. Known issues are: * * - FIFO size field in FSR is always zero. * * - FIFO interrupts tend not to work as they should. Interrupts are * triggered only for full/empty events, not for threshold values. * * - On APIC systems the FIFO empty interrupt is sometimes lost. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/pnp.h> #include <linux/highmem.h> #include <linux/mmc/host.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/dma.h> #include "wbsd.h" #define DRIVER_NAME "wbsd" #define DBG(x...) \ pr_debug(DRIVER_NAME ": " x) #define DBGF(f, x...) \ pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x) /* * Device resources */ #ifdef CONFIG_PNP static const struct pnp_device_id pnp_dev_table[] = { { "WEC0517", 0 }, { "WEC0518", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(pnp, pnp_dev_table); #endif /* CONFIG_PNP */ static const int config_ports[] = { 0x2E, 0x4E }; static const int unlock_codes[] = { 0x83, 0x87 }; static const int valid_ids[] = { 0x7112, }; #ifdef CONFIG_PNP static unsigned int param_nopnp = 0; #else static const unsigned int param_nopnp = 1; #endif static unsigned int param_io = 0x248; static unsigned int param_irq = 6; static int param_dma = 2; /* * Basic functions */ static inline void wbsd_unlock_config(struct wbsd_host *host) { BUG_ON(host->config == 0); outb(host->unlock_code, host->config); outb(host->unlock_code, host->config); } static inline void wbsd_lock_config(struct wbsd_host *host) { BUG_ON(host->config == 0); outb(LOCK_CODE, host->config); } static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value) { BUG_ON(host->config == 0); outb(reg, host->config); outb(value, host->config + 1); } static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg) { BUG_ON(host->config == 0); outb(reg, host->config); return inb(host->config + 1); } static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value) { outb(index, host->base + WBSD_IDXR); outb(value, host->base + WBSD_DATAR); } static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index) { outb(index, host->base + WBSD_IDXR); return inb(host->base + WBSD_DATAR); } /* * Common routines */ static void wbsd_init_device(struct wbsd_host *host) { u8 setup, ier; /* * Reset chip (SD/MMC part) and fifo. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * Set DAT3 to input */ setup &= ~WBSD_DAT3_H; wbsd_write_index(host, WBSD_IDX_SETUP, setup); host->flags &= ~WBSD_FIGNORE_DETECT; /* * Read back default clock. */ host->clk = wbsd_read_index(host, WBSD_IDX_CLK); /* * Power down port. */ outb(WBSD_POWER_N, host->base + WBSD_CSR); /* * Set maximum timeout. */ wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F); /* * Test for card presence */ if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT) host->flags |= WBSD_FCARD_PRESENT; else host->flags &= ~WBSD_FCARD_PRESENT; /* * Enable interesting interrupts. */ ier = 0; ier |= WBSD_EINT_CARD; ier |= WBSD_EINT_FIFO_THRE; ier |= WBSD_EINT_CRC; ier |= WBSD_EINT_TIMEOUT; ier |= WBSD_EINT_TC; outb(ier, host->base + WBSD_EIR); /* * Clear interrupts. */ inb(host->base + WBSD_ISR); } static void wbsd_reset(struct wbsd_host *host) { u8 setup; printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc)); /* * Soft reset of chip (SD/MMC part). */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_SOFT_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); } static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq) { unsigned long dmaflags; if (host->dma >= 0) { /* * Release ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); release_dma_lock(dmaflags); /* * Disable DMA on host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); } host->mrq = NULL; /* * MMC layer might call back into the driver so first unlock. */ spin_unlock(&host->lock); mmc_request_done(host->mmc, mrq); spin_lock(&host->lock); } /* * Scatter/gather functions */ static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data) { /* * Get info. about SG list from data structure. */ host->cur_sg = data->sg; host->num_sg = data->sg_len; host->offset = 0; host->remain = host->cur_sg->length; } static inline int wbsd_next_sg(struct wbsd_host *host) { /* * Skip to next SG entry. */ host->cur_sg++; host->num_sg--; /* * Any entries left? */ if (host->num_sg > 0) { host->offset = 0; host->remain = host->cur_sg->length; } return host->num_sg; } static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) { return sg_virt(host->cur_sg); } static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) { unsigned int len, i; struct scatterlist *sg; char *dmabuf = host->dma_buffer; char *sgbuf; sg = data->sg; len = data->sg_len; for (i = 0; i < len; i++) { sgbuf = sg_virt(&sg[i]); memcpy(dmabuf, sgbuf, sg[i].length); dmabuf += sg[i].length; } } static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) { unsigned int len, i; struct scatterlist *sg; char *dmabuf = host->dma_buffer; char *sgbuf; sg = data->sg; len = data->sg_len; for (i = 0; i < len; i++) { sgbuf = sg_virt(&sg[i]); memcpy(sgbuf, dmabuf, sg[i].length); dmabuf += sg[i].length; } } /* * Command handling */ static inline void wbsd_get_short_reply(struct wbsd_host *host, struct mmc_command *cmd) { /* * Correct response type? */ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) { cmd->error = -EILSEQ; return; } cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8; cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0; cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24; } static inline void wbsd_get_long_reply(struct wbsd_host *host, struct mmc_command *cmd) { int i; /* * Correct response type? */ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) { cmd->error = -EILSEQ; return; } for (i = 0; i < 4; i++) { cmd->resp[i] = wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8; cmd->resp[i] |= wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0; } } static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd) { int i; u8 status, isr; /* * Clear accumulated ISR. The interrupt routine * will fill this one with events that occur during * transfer. */ host->isr = 0; /* * Send the command (CRC calculated by host). */ outb(cmd->opcode, host->base + WBSD_CMDR); for (i = 3; i >= 0; i--) outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR); cmd->error = 0; /* * Wait for the request to complete. */ do { status = wbsd_read_index(host, WBSD_IDX_STATUS); } while (status & WBSD_CARDTRAFFIC); /* * Do we expect a reply? */ if (cmd->flags & MMC_RSP_PRESENT) { /* * Read back status. */ isr = host->isr; /* Card removed? */ if (isr & WBSD_INT_CARD) cmd->error = -ENOMEDIUM; /* Timeout? */ else if (isr & WBSD_INT_TIMEOUT) cmd->error = -ETIMEDOUT; /* CRC? */ else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC)) cmd->error = -EILSEQ; /* All ok */ else { if (cmd->flags & MMC_RSP_136) wbsd_get_long_reply(host, cmd); else wbsd_get_short_reply(host, cmd); } } } /* * Data functions */ static void wbsd_empty_fifo(struct wbsd_host *host) { struct mmc_data *data = host->mrq->cmd->data; char *buffer; int i, fsr, fifo; /* * Handle excessive data. */ if (host->num_sg == 0) return; buffer = wbsd_sg_to_buffer(host) + host->offset; /* * Drain the fifo. This has a tendency to loop longer * than the FIFO length (usually one block). */ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) { /* * The size field in the FSR is broken so we have to * do some guessing. */ if (fsr & WBSD_FIFO_FULL) fifo = 16; else if (fsr & WBSD_FIFO_FUTHRE) fifo = 8; else fifo = 1; for (i = 0; i < fifo; i++) { *buffer = inb(host->base + WBSD_DFR); buffer++; host->offset++; host->remain--; data->bytes_xfered++; /* * End of scatter list entry? */ if (host->remain == 0) { /* * Get next entry. Check if last. */ if (!wbsd_next_sg(host)) return; buffer = wbsd_sg_to_buffer(host); } } } /* * This is a very dirty hack to solve a * hardware problem. The chip doesn't trigger * FIFO threshold interrupts properly. */ if ((data->blocks * data->blksz - data->bytes_xfered) < 16) tasklet_schedule(&host->fifo_tasklet); } static void wbsd_fill_fifo(struct wbsd_host *host) { struct mmc_data *data = host->mrq->cmd->data; char *buffer; int i, fsr, fifo; /* * Check that we aren't being called after the * entire buffer has been transferred. */ if (host->num_sg == 0) return; buffer = wbsd_sg_to_buffer(host) + host->offset; /* * Fill the fifo. This has a tendency to loop longer * than the FIFO length (usually one block). */ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) { /* * The size field in the FSR is broken so we have to * do some guessing. */ if (fsr & WBSD_FIFO_EMPTY) fifo = 0; else if (fsr & WBSD_FIFO_EMTHRE) fifo = 8; else fifo = 15; for (i = 16; i > fifo; i--) { outb(*buffer, host->base + WBSD_DFR); buffer++; host->offset++; host->remain--; data->bytes_xfered++; /* * End of scatter list entry? */ if (host->remain == 0) { /* * Get next entry. Check if last. */ if (!wbsd_next_sg(host)) return; buffer = wbsd_sg_to_buffer(host); } } } /* * The controller stops sending interrupts for * 'FIFO empty' under certain conditions. So we * need to be a bit more pro-active. */ tasklet_schedule(&host->fifo_tasklet); } static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) { u16 blksize; u8 setup; unsigned long dmaflags; unsigned int size; /* * Calculate size. */ size = data->blocks * data->blksz; /* * Check timeout values for overflow. * (Yes, some cards cause this value to overflow). */ if (data->timeout_ns > 127000000) wbsd_write_index(host, WBSD_IDX_TAAC, 127); else { wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns / 1000000); } if (data->timeout_clks > 255) wbsd_write_index(host, WBSD_IDX_NSAC, 255); else wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks); /* * Inform the chip of how large blocks will be * sent. It needs this to determine when to * calculate CRC. * * Space for CRC must be included in the size. * Two bytes are needed for each data line. */ if (host->bus_width == MMC_BUS_WIDTH_1) { blksize = data->blksz + 2; wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else if (host->bus_width == MMC_BUS_WIDTH_4) { blksize = data->blksz + 2 * 4; wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else { data->error = -EINVAL; return; } /* * Clear the FIFO. This is needed even for DMA * transfers since the chip still uses the FIFO * internally. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); setup |= WBSD_FIFO_RESET; wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * DMA transfer? */ if (host->dma >= 0) { /* * The buffer for DMA is only 64 kB. */ BUG_ON(size > 0x10000); if (size > 0x10000) { data->error = -EINVAL; return; } /* * Transfer data from the SG list to * the DMA buffer. */ if (data->flags & MMC_DATA_WRITE) wbsd_sg_to_dma(host, data); /* * Initialise the ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); if (data->flags & MMC_DATA_READ) set_dma_mode(host->dma, DMA_MODE_READ & ~0x40); else set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40); set_dma_addr(host->dma, host->dma_addr); set_dma_count(host->dma, size); enable_dma(host->dma); release_dma_lock(dmaflags); /* * Enable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE); } else { /* * This flag is used to keep printk * output to a minimum. */ host->firsterr = 1; /* * Initialise the SG list. */ wbsd_init_sg(host, data); /* * Turn off DMA. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Set up FIFO threshold levels (and fill * buffer if doing a write). */ if (data->flags & MMC_DATA_READ) { wbsd_write_index(host, WBSD_IDX_FIFOEN, WBSD_FIFOEN_FULL | 8); } else { wbsd_write_index(host, WBSD_IDX_FIFOEN, WBSD_FIFOEN_EMPTY | 8); wbsd_fill_fifo(host); } } data->error = 0; } static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data) { unsigned long dmaflags; int count; u8 status; WARN_ON(host->mrq == NULL); /* * Send a stop command if needed. */ if (data->stop) wbsd_send_command(host, data->stop); /* * Wait for the controller to leave data * transfer state. */ do { status = wbsd_read_index(host, WBSD_IDX_STATUS); } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE)); /* * DMA transfer? */ if (host->dma >= 0) { /* * Disable DMA on the host. */ wbsd_write_index(host, WBSD_IDX_DMA, 0); /* * Turn of ISA DMA controller. */ dmaflags = claim_dma_lock(); disable_dma(host->dma); clear_dma_ff(host->dma); count = get_dma_residue(host->dma); release_dma_lock(dmaflags); data->bytes_xfered = host->mrq->data->blocks * host->mrq->data->blksz - count; data->bytes_xfered -= data->bytes_xfered % data->blksz; /* * Any leftover data? */ if (count) { printk(KERN_ERR "%s: Incomplete DMA transfer. " "%d bytes left.\n", mmc_hostname(host->mmc), count); if (!data->error) data->error = -EIO; } else { /* * Transfer data from DMA buffer to * SG list. */ if (data->flags & MMC_DATA_READ) wbsd_dma_to_sg(host, data); } if (data->error) { if (data->bytes_xfered) data->bytes_xfered -= data->blksz; } } wbsd_request_end(host, host->mrq); } /*****************************************************************************\ * * * MMC layer callbacks * * * \*****************************************************************************/ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct wbsd_host *host = mmc_priv(mmc); struct mmc_command *cmd; /* * Disable tasklets to avoid a deadlock. */ spin_lock_bh(&host->lock); BUG_ON(host->mrq != NULL); cmd = mrq->cmd; host->mrq = mrq; /* * Check that there is actually a card in the slot. */ if (!(host->flags & WBSD_FCARD_PRESENT)) { cmd->error = -ENOMEDIUM; goto done; } if (cmd->data) { /* * The hardware is so delightfully stupid that it has a list * of "data" commands. If a command isn't on this list, it'll * just go back to the idle state and won't send any data * interrupts. */ switch (cmd->opcode) { case 11: case 17: case 18: case 20: case 24: case 25: case 26: case 27: case 30: case 42: case 56: break; /* ACMDs. We don't keep track of state, so we just treat them * like any other command. */ case 51: break; default: #ifdef CONFIG_MMC_DEBUG printk(KERN_WARNING "%s: Data command %d is not " "supported by this controller.\n", mmc_hostname(host->mmc), cmd->opcode); #endif cmd->error = -EINVAL; goto done; }; } /* * Does the request include data? */ if (cmd->data) { wbsd_prepare_data(host, cmd->data); if (cmd->data->error) goto done; } wbsd_send_command(host, cmd); /* * If this is a data transfer the request * will be finished after the data has * transferred. */ if (cmd->data && !cmd->error) { /* * Dirty fix for hardware bug. */ if (host->dma == -1) tasklet_schedule(&host->fifo_tasklet); spin_unlock_bh(&host->lock); return; } done: wbsd_request_end(host, mrq); spin_unlock_bh(&host->lock); } static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct wbsd_host *host = mmc_priv(mmc); u8 clk, setup, pwr; spin_lock_bh(&host->lock); /* * Reset the chip on each power off. * Should clear out any weird states. */ if (ios->power_mode == MMC_POWER_OFF) wbsd_init_device(host); if (ios->clock >= 24000000) clk = WBSD_CLK_24M; else if (ios->clock >= 16000000) clk = WBSD_CLK_16M; else if (ios->clock >= 12000000) clk = WBSD_CLK_12M; else clk = WBSD_CLK_375K; /* * Only write to the clock register when * there is an actual change. */ if (clk != host->clk) { wbsd_write_index(host, WBSD_IDX_CLK, clk); host->clk = clk; } /* * Power up card. */ if (ios->power_mode != MMC_POWER_OFF) { pwr = inb(host->base + WBSD_CSR); pwr &= ~WBSD_POWER_N; outb(pwr, host->base + WBSD_CSR); } /* * MMC cards need to have pin 1 high during init. * It wreaks havoc with the card detection though so * that needs to be disabled. */ setup = wbsd_read_index(host, WBSD_IDX_SETUP); if (ios->chip_select == MMC_CS_HIGH) { BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1); setup |= WBSD_DAT3_H; host->flags |= WBSD_FIGNORE_DETECT; } else { if (setup & WBSD_DAT3_H) { setup &= ~WBSD_DAT3_H; /* * We cannot resume card detection immediately * because of capacitance and delays in the chip. */ mod_timer(&host->ignore_timer, jiffies + HZ / 100); } } wbsd_write_index(host, WBSD_IDX_SETUP, setup); /* * Store bus width for later. Will be used when * setting up the data transfer. */ host->bus_width = ios->bus_width; spin_unlock_bh(&host->lock); } static int wbsd_get_ro(struct mmc_host *mmc) { struct wbsd_host *host = mmc_priv(mmc); u8 csr; spin_lock_bh(&host->lock); csr = inb(host->base + WBSD_CSR); csr |= WBSD_MSLED; outb(csr, host->base + WBSD_CSR); mdelay(1); csr = inb(host->base + WBSD_CSR); csr &= ~WBSD_MSLED; outb(csr, host->base + WBSD_CSR); spin_unlock_bh(&host->lock); return !!(csr & WBSD_WRPT); } static const struct mmc_host_ops wbsd_ops = { .request = wbsd_request, .set_ios = wbsd_set_ios, .get_ro = wbsd_get_ro, }; /*****************************************************************************\ * * * Interrupt handling * * * \*****************************************************************************/ /* * Helper function to reset detection ignore */ static void wbsd_reset_ignore(unsigned long data) { struct wbsd_host *host = (struct wbsd_host *)data; BUG_ON(host == NULL); DBG("Resetting card detection ignore\n"); spin_lock_bh(&host->lock); host->flags &= ~WBSD_FIGNORE_DETECT; /* * Card status might have changed during the * blackout. */ tasklet_schedule(&host->card_tasklet); spin_unlock_bh(&host->lock); } /* * Tasklets */ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) { WARN_ON(!host->mrq); if (!host->mrq) return NULL; WARN_ON(!host->mrq->cmd); if (!host->mrq->cmd) return NULL; WARN_ON(!host->mrq->cmd->data); if (!host->mrq->cmd->data) return NULL; return host->mrq->cmd->data; } static void wbsd_tasklet_card(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; u8 csr; int delay = -1; spin_lock(&host->lock); if (host->flags & WBSD_FIGNORE_DETECT) { spin_unlock(&host->lock); return; } csr = inb(host->base + WBSD_CSR); WARN_ON(csr == 0xff); if (csr & WBSD_CARDPRESENT) { if (!(host->flags & WBSD_FCARD_PRESENT)) { DBG("Card inserted\n"); host->flags |= WBSD_FCARD_PRESENT; delay = 500; } } else if (host->flags & WBSD_FCARD_PRESENT) { DBG("Card removed\n"); host->flags &= ~WBSD_FCARD_PRESENT; if (host->mrq) { printk(KERN_ERR "%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); wbsd_reset(host); host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } delay = 0; } /* * Unlock first since we might get a call back. */ spin_unlock(&host->lock); if (delay != -1) mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); } static void wbsd_tasklet_fifo(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; if (data->flags & MMC_DATA_WRITE) wbsd_fill_fifo(host); else wbsd_empty_fifo(host); /* * Done? */ if (host->num_sg == 0) { wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); tasklet_schedule(&host->finish_tasklet); } end: spin_unlock(&host->lock); } static void wbsd_tasklet_crc(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; DBGF("CRC error\n"); data->error = -EILSEQ; tasklet_schedule(&host->finish_tasklet); end: spin_unlock(&host->lock); } static void wbsd_tasklet_timeout(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; DBGF("Timeout\n"); data->error = -ETIMEDOUT; tasklet_schedule(&host->finish_tasklet); end: spin_unlock(&host->lock); } static void wbsd_tasklet_finish(unsigned long param) { struct wbsd_host *host = (struct wbsd_host *)param; struct mmc_data *data; spin_lock(&host->lock); WARN_ON(!host->mrq); if (!host->mrq) goto end; data = wbsd_get_data(host); if (!data) goto end; wbsd_finish_data(host, data); end: spin_unlock(&host->lock); } /* * Interrupt handling */ static irqreturn_t wbsd_irq(int irq, void *dev_id) { struct wbsd_host *host = dev_id; int isr; isr = inb(host->base + WBSD_ISR); /* * Was it actually our hardware that caused the interrupt? */ if (isr == 0xff || isr == 0x00) return IRQ_NONE; host->isr |= isr; /* * Schedule tasklets as needed. */ if (isr & WBSD_INT_CARD) tasklet_schedule(&host->card_tasklet); if (isr & WBSD_INT_FIFO_THRE) tasklet_schedule(&host->fifo_tasklet); if (isr & WBSD_INT_CRC) tasklet_hi_schedule(&host->crc_tasklet); if (isr & WBSD_INT_TIMEOUT) tasklet_hi_schedule(&host->timeout_tasklet); if (isr & WBSD_INT_TC) tasklet_schedule(&host->finish_tasklet); return IRQ_HANDLED; } /*****************************************************************************\ * * * Device initialisation and shutdown * * * \*****************************************************************************/ /* * Allocate/free MMC structure. */ static int __devinit wbsd_alloc_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; /* * Allocate MMC structure. */ mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); host->mmc = mmc; host->dma = -1; /* * Set host parameters. */ mmc->ops = &wbsd_ops; mmc->f_min = 375000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; spin_lock_init(&host->lock); /* * Set up timers */ init_timer(&host->ignore_timer); host->ignore_timer.data = (unsigned long)host; host->ignore_timer.function = wbsd_reset_ignore; /* * Maximum number of segments. Worst case is one sector per segment * so this will be 64kB/512. */ mmc->max_segs = 128; /* * Maximum request size. Also limited by 64KiB buffer. */ mmc->max_req_size = 65536; /* * Maximum segment size. Could be one segment with the maximum number * of bytes. */ mmc->max_seg_size = mmc->max_req_size; /* * Maximum block size. We have 12 bits (= 4095) but have to subtract * space for CRC. So the maximum is 4095 - 4*2 = 4087. */ mmc->max_blk_size = 4087; /* * Maximum block count. There is no real limit so the maximum * request size will be the only restriction. */ mmc->max_blk_count = mmc->max_req_size; dev_set_drvdata(dev, mmc); return 0; } static void wbsd_free_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; mmc = dev_get_drvdata(dev); if (!mmc) return; host = mmc_priv(mmc); BUG_ON(host == NULL); del_timer_sync(&host->ignore_timer); mmc_free_host(mmc); dev_set_drvdata(dev, NULL); } /* * Scan for known chip id:s */ static int __devinit wbsd_scan(struct wbsd_host *host) { int i, j, k; int id; /* * Iterate through all ports, all codes to * find hardware that is in our known list. */ for (i = 0; i < ARRAY_SIZE(config_ports); i++) { if (!request_region(config_ports[i], 2, DRIVER_NAME)) continue; for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) { id = 0xFFFF; host->config = config_ports[i]; host->unlock_code = unlock_codes[j]; wbsd_unlock_config(host); outb(WBSD_CONF_ID_HI, config_ports[i]); id = inb(config_ports[i] + 1) << 8; outb(WBSD_CONF_ID_LO, config_ports[i]); id |= inb(config_ports[i] + 1); wbsd_lock_config(host); for (k = 0; k < ARRAY_SIZE(valid_ids); k++) { if (id == valid_ids[k]) { host->chip_id = id; return 0; } } if (id != 0xFFFF) { DBG("Unknown hardware (id %x) found at %x\n", id, config_ports[i]); } } release_region(config_ports[i], 2); } host->config = 0; host->unlock_code = 0; return -ENODEV; } /* * Allocate/free io port ranges */ static int __devinit wbsd_request_region(struct wbsd_host *host, int base) { if (base & 0x7) return -EINVAL; if (!request_region(base, 8, DRIVER_NAME)) return -EIO; host->base = base; return 0; } static void wbsd_release_regions(struct wbsd_host *host) { if (host->base) release_region(host->base, 8); host->base = 0; if (host->config) release_region(host->config, 2); host->config = 0; } /* * Allocate/free DMA port and buffer */ static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma) { if (dma < 0) return; if (request_dma(dma, DRIVER_NAME)) goto err; /* * We need to allocate a special buffer in * order for ISA to be able to DMA to it. */ host->dma_buffer = kmalloc(WBSD_DMA_SIZE, GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN); if (!host->dma_buffer) goto free; /* * Translate the address to a physical address. */ host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); /* * ISA DMA must be aligned on a 64k basis. */ if ((host->dma_addr & 0xffff) != 0) goto kfree; /* * ISA cannot access memory above 16 MB. */ else if (host->dma_addr >= 0x1000000) goto kfree; host->dma = dma; return; kfree: /* * If we've gotten here then there is some kind of alignment bug */ BUG_ON(1); dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); host->dma_addr = 0; kfree(host->dma_buffer); host->dma_buffer = NULL; free: free_dma(dma); err: printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. " "Falling back on FIFO.\n", dma); } static void wbsd_release_dma(struct wbsd_host *host) { if (host->dma_addr) { dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); } kfree(host->dma_buffer); if (host->dma >= 0) free_dma(host->dma); host->dma = -1; host->dma_buffer = NULL; host->dma_addr = 0; } /* * Allocate/free IRQ. */ static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq) { int ret; /* * Set up tasklets. Must be done before requesting interrupt. */ tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host); tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host); tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host); tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host); tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host); /* * Allocate interrupt. */ ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host); if (ret) return ret; host->irq = irq; return 0; } static void wbsd_release_irq(struct wbsd_host *host) { if (!host->irq) return; free_irq(host->irq, host); host->irq = 0; tasklet_kill(&host->card_tasklet); tasklet_kill(&host->fifo_tasklet); tasklet_kill(&host->crc_tasklet); tasklet_kill(&host->timeout_tasklet); tasklet_kill(&host->finish_tasklet); } /* * Allocate all resources for the host. */ static int __devinit wbsd_request_resources(struct wbsd_host *host, int base, int irq, int dma) { int ret; /* * Allocate I/O ports. */ ret = wbsd_request_region(host, base); if (ret) return ret; /* * Allocate interrupt. */ ret = wbsd_request_irq(host, irq); if (ret) return ret; /* * Allocate DMA. */ wbsd_request_dma(host, dma); return 0; } /* * Release all resources for the host. */ static void wbsd_release_resources(struct wbsd_host *host) { wbsd_release_dma(host); wbsd_release_irq(host); wbsd_release_regions(host); } /* * Configure the resources the chip should use. */ static void wbsd_chip_config(struct wbsd_host *host) { wbsd_unlock_config(host); /* * Reset the chip. */ wbsd_write_config(host, WBSD_CONF_SWRST, 1); wbsd_write_config(host, WBSD_CONF_SWRST, 0); /* * Select SD/MMC function. */ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); /* * Set up card detection. */ wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11); /* * Configure chip */ wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8); wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff); wbsd_write_config(host, WBSD_CONF_IRQ, host->irq); if (host->dma >= 0) wbsd_write_config(host, WBSD_CONF_DRQ, host->dma); /* * Enable and power up chip. */ wbsd_write_config(host, WBSD_CONF_ENABLE, 1); wbsd_write_config(host, WBSD_CONF_POWER, 0x20); wbsd_lock_config(host); } /* * Check that configured resources are correct. */ static int wbsd_chip_validate(struct wbsd_host *host) { int base, irq, dma; wbsd_unlock_config(host); /* * Select SD/MMC function. */ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); /* * Read configuration. */ base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8; base |= wbsd_read_config(host, WBSD_CONF_PORT_LO); irq = wbsd_read_config(host, WBSD_CONF_IRQ); dma = wbsd_read_config(host, WBSD_CONF_DRQ); wbsd_lock_config(host); /* * Validate against given configuration. */ if (base != host->base) return 0; if (irq != host->irq) return 0; if ((dma != host->dma) && (host->dma != -1)) return 0; return 1; } /* * Powers down the SD function */ static void wbsd_chip_poweroff(struct wbsd_host *host) { wbsd_unlock_config(host); wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); wbsd_write_config(host, WBSD_CONF_ENABLE, 0); wbsd_lock_config(host); } /*****************************************************************************\ * * * Devices setup and shutdown * * * \*****************************************************************************/ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, int pnp) { struct wbsd_host *host = NULL; struct mmc_host *mmc = NULL; int ret; ret = wbsd_alloc_mmc(dev); if (ret) return ret; mmc = dev_get_drvdata(dev); host = mmc_priv(mmc); /* * Scan for hardware. */ ret = wbsd_scan(host); if (ret) { if (pnp && (ret == -ENODEV)) { printk(KERN_WARNING DRIVER_NAME ": Unable to confirm device presence. You may " "experience lock-ups.\n"); } else { wbsd_free_mmc(dev); return ret; } } /* * Request resources. */ ret = wbsd_request_resources(host, base, irq, dma); if (ret) { wbsd_release_resources(host); wbsd_free_mmc(dev); return ret; } /* * See if chip needs to be configured. */ if (pnp) { if ((host->config != 0) && !wbsd_chip_validate(host)) { printk(KERN_WARNING DRIVER_NAME ": PnP active but chip not configured! " "You probably have a buggy BIOS. " "Configuring chip manually.\n"); wbsd_chip_config(host); } } else wbsd_chip_config(host); /* * Power Management stuff. No idea how this works. * Not tested. */ #ifdef CONFIG_PM if (host->config) { wbsd_unlock_config(host); wbsd_write_config(host, WBSD_CONF_PME, 0xA0); wbsd_lock_config(host); } #endif /* * Allow device to initialise itself properly. */ mdelay(5); /* * Reset the chip into a known state. */ wbsd_init_device(host); mmc_add_host(mmc); printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc)); if (host->chip_id != 0) printk(" id %x", (int)host->chip_id); printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); if (host->dma >= 0) printk(" dma %d", (int)host->dma); else printk(" FIFO"); if (pnp) printk(" PnP"); printk("\n"); return 0; } static void __devexit wbsd_shutdown(struct device *dev, int pnp) { struct mmc_host *mmc = dev_get_drvdata(dev); struct wbsd_host *host; if (!mmc) return; host = mmc_priv(mmc); mmc_remove_host(mmc); /* * Power down the SD/MMC function. */ if (!pnp) wbsd_chip_poweroff(host); wbsd_release_resources(host); wbsd_free_mmc(dev); } /* * Non-PnP */ static int __devinit wbsd_probe(struct platform_device *dev) { /* Use the module parameters for resources */ return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); } static int __devexit wbsd_remove(struct platform_device *dev) { wbsd_shutdown(&dev->dev, 0); return 0; } /* * PnP */ #ifdef CONFIG_PNP static int __devinit wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) { int io, irq, dma; /* * Get resources from PnP layer. */ io = pnp_port_start(pnpdev, 0); irq = pnp_irq(pnpdev, 0); if (pnp_dma_valid(pnpdev, 0)) dma = pnp_dma(pnpdev, 0); else dma = -1; DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma); return wbsd_init(&pnpdev->dev, io, irq, dma, 1); } static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) { wbsd_shutdown(&dev->dev, 1); } #endif /* CONFIG_PNP */ /* * Power management */ #ifdef CONFIG_PM static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) { BUG_ON(host == NULL); return mmc_suspend_host(host->mmc); } static int wbsd_resume(struct wbsd_host *host) { BUG_ON(host == NULL); wbsd_init_device(host); return mmc_resume_host(host->mmc); } static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; int ret; if (mmc == NULL) return 0; DBGF("Suspending...\n"); host = mmc_priv(mmc); ret = wbsd_suspend(host, state); if (ret) return ret; wbsd_chip_poweroff(host); return 0; } static int wbsd_platform_resume(struct platform_device *dev) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Resuming...\n"); host = mmc_priv(mmc); wbsd_chip_config(host); /* * Allow device to initialise itself properly. */ mdelay(5); return wbsd_resume(host); } #ifdef CONFIG_PNP static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Suspending...\n"); host = mmc_priv(mmc); return wbsd_suspend(host, state); } static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Resuming...\n"); host = mmc_priv(mmc); /* * See if chip needs to be configured. */ if (host->config != 0) { if (!wbsd_chip_validate(host)) { printk(KERN_WARNING DRIVER_NAME ": PnP active but chip not configured! " "You probably have a buggy BIOS. " "Configuring chip manually.\n"); wbsd_chip_config(host); } } /* * Allow device to initialise itself properly. */ mdelay(5); return wbsd_resume(host); } #endif /* CONFIG_PNP */ #else /* CONFIG_PM */ #define wbsd_platform_suspend NULL #define wbsd_platform_resume NULL #define wbsd_pnp_suspend NULL #define wbsd_pnp_resume NULL #endif /* CONFIG_PM */ static struct platform_device *wbsd_device; static struct platform_driver wbsd_driver = { .probe = wbsd_probe, .remove = __devexit_p(wbsd_remove), .suspend = wbsd_platform_suspend, .resume = wbsd_platform_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; #ifdef CONFIG_PNP static struct pnp_driver wbsd_pnp_driver = { .name = DRIVER_NAME, .id_table = pnp_dev_table, .probe = wbsd_pnp_probe, .remove = __devexit_p(wbsd_pnp_remove), .suspend = wbsd_pnp_suspend, .resume = wbsd_pnp_resume, }; #endif /* CONFIG_PNP */ /* * Module loading/unloading */ static int __init wbsd_drv_init(void) { int result; printk(KERN_INFO DRIVER_NAME ": Winbond W83L51xD SD/MMC card interface driver\n"); printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); #ifdef CONFIG_PNP if (!param_nopnp) { result = pnp_register_driver(&wbsd_pnp_driver); if (result < 0) return result; } #endif /* CONFIG_PNP */ if (param_nopnp) { result = platform_driver_register(&wbsd_driver); if (result < 0) return result; wbsd_device = platform_device_alloc(DRIVER_NAME, -1); if (!wbsd_device) { platform_driver_unregister(&wbsd_driver); return -ENOMEM; } result = platform_device_add(wbsd_device); if (result) { platform_device_put(wbsd_device); platform_driver_unregister(&wbsd_driver); return result; } } return 0; } static void __exit wbsd_drv_exit(void) { #ifdef CONFIG_PNP if (!param_nopnp) pnp_unregister_driver(&wbsd_pnp_driver); #endif /* CONFIG_PNP */ if (param_nopnp) { platform_device_unregister(wbsd_device); platform_driver_unregister(&wbsd_driver); } DBG("unloaded\n"); } module_init(wbsd_drv_init); module_exit(wbsd_drv_exit); #ifdef CONFIG_PNP module_param_named(nopnp, param_nopnp, uint, 0444); #endif module_param_named(io, param_io, uint, 0444); module_param_named(irq, param_irq, uint, 0444); module_param_named(dma, param_dma, int, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); #ifdef CONFIG_PNP MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)"); #endif MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)"); MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)"); MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
gpl-2.0
voidz777/omap
sound/sound_core.c
3072
16327
/* * Sound core. This file is composed of two parts. sound_class * which is common to both OSS and ALSA and OSS sound core which * is used OSS or emulation of it. */ /* * First, the common part. */ #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <sound/core.h> #ifdef CONFIG_SOUND_OSS_CORE static int __init init_oss_soundcore(void); static void cleanup_oss_soundcore(void); #else static inline int init_oss_soundcore(void) { return 0; } static inline void cleanup_oss_soundcore(void) { } #endif struct class *sound_class; EXPORT_SYMBOL(sound_class); MODULE_DESCRIPTION("Core sound module"); MODULE_AUTHOR("Alan Cox"); MODULE_LICENSE("GPL"); static char *sound_devnode(struct device *dev, mode_t *mode) { if (MAJOR(dev->devt) == SOUND_MAJOR) return NULL; return kasprintf(GFP_KERNEL, "snd/%s", dev_name(dev)); } static int __init init_soundcore(void) { int rc; rc = init_oss_soundcore(); if (rc) return rc; sound_class = class_create(THIS_MODULE, "sound"); if (IS_ERR(sound_class)) { cleanup_oss_soundcore(); return PTR_ERR(sound_class); } sound_class->devnode = sound_devnode; return 0; } static void __exit cleanup_soundcore(void) { cleanup_oss_soundcore(); class_destroy(sound_class); } subsys_initcall(init_soundcore); module_exit(cleanup_soundcore); #ifdef CONFIG_SOUND_OSS_CORE /* * OSS sound core handling. Breaks out sound functions to submodules * * Author: Alan Cox <alan@lxorguk.ukuu.org.uk> * * Fixes: * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * -------------------- * * Top level handler for the sound subsystem. Various devices can * plug into this. The fact they don't all go via OSS doesn't mean * they don't have to implement the OSS API. There is a lot of logic * to keeping much of the OSS weight out of the code in a compatibility * module, but it's up to the driver to rember to load it... * * The code provides a set of functions for registration of devices * by type. This is done rather than providing a single call so that * we can hide any future changes in the internals (eg when we go to * 32bit dev_t) from the modules and their interface. * * Secondly we need to allocate the dsp, dsp16 and audio devices as * one. Thus we misuse the chains a bit to simplify this. * * Thirdly to make it more fun and for 2.3.x and above we do all * of this using fine grained locking. * * FIXME: we have to resolve modules and fine grained load/unload * locking at some point in 2.3.x. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sound.h> #include <linux/kmod.h> #define SOUND_STEP 16 struct sound_unit { int unit_minor; const struct file_operations *unit_fops; struct sound_unit *next; char name[32]; }; #ifdef CONFIG_SOUND_MSNDCLAS extern int msnd_classic_init(void); #endif #ifdef CONFIG_SOUND_MSNDPIN extern int msnd_pinnacle_init(void); #endif /* * By default, OSS sound_core claims full legacy minor range (0-255) * of SOUND_MAJOR to trap open attempts to any sound minor and * requests modules using custom sound-slot/service-* module aliases. * The only benefit of doing this is allowing use of custom module * aliases instead of the standard char-major-* ones. This behavior * prevents alternative OSS implementation and is scheduled to be * removed. * * CONFIG_SOUND_OSS_CORE_PRECLAIM and soundcore.preclaim_oss kernel * parameter are added to allow distros and developers to try and * switch to alternative implementations without needing to rebuild * the kernel in the meantime. If preclaim_oss is non-zero, the * kernel will behave the same as before. All SOUND_MAJOR minors are * preclaimed and the custom module aliases along with standard chrdev * ones are emitted if a missing device is opened. If preclaim_oss is * zero, sound_core only grabs what's actually in use and for missing * devices only the standard chrdev aliases are requested. * * All these clutters are scheduled to be removed along with * sound-slot/service-* module aliases. Please take a look at * feature-removal-schedule.txt for details. */ #ifdef CONFIG_SOUND_OSS_CORE_PRECLAIM static int preclaim_oss = 1; #else static int preclaim_oss = 0; #endif module_param(preclaim_oss, int, 0444); static int soundcore_open(struct inode *, struct file *); static const struct file_operations soundcore_fops = { /* We must have an owner or the module locking fails */ .owner = THIS_MODULE, .open = soundcore_open, .llseek = noop_llseek, }; /* * Low level list operator. Scan the ordered list, find a hole and * join into it. Called with the lock asserted */ static int __sound_insert_unit(struct sound_unit * s, struct sound_unit **list, const struct file_operations *fops, int index, int low, int top) { int n=low; if (index < 0) { /* first free */ while (*list && (*list)->unit_minor<n) list=&((*list)->next); while(n<top) { /* Found a hole ? */ if(*list==NULL || (*list)->unit_minor>n) break; list=&((*list)->next); n+=SOUND_STEP; } if(n>=top) return -ENOENT; } else { n = low+(index*16); while (*list) { if ((*list)->unit_minor==n) return -EBUSY; if ((*list)->unit_minor>n) break; list=&((*list)->next); } } /* * Fill it in */ s->unit_minor=n; s->unit_fops=fops; /* * Link it */ s->next=*list; *list=s; return n; } /* * Remove a node from the chain. Called with the lock asserted */ static struct sound_unit *__sound_remove_unit(struct sound_unit **list, int unit) { while(*list) { struct sound_unit *p=*list; if(p->unit_minor==unit) { *list=p->next; return p; } list=&(p->next); } printk(KERN_ERR "Sound device %d went missing!\n", unit); return NULL; } /* * This lock guards the sound loader list. */ static DEFINE_SPINLOCK(sound_loader_lock); /* * Allocate the controlling structure and add it to the sound driver * list. Acquires locks as needed */ static int sound_insert_unit(struct sound_unit **list, const struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev) { struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL); int r; if (!s) return -ENOMEM; spin_lock(&sound_loader_lock); retry: r = __sound_insert_unit(s, list, fops, index, low, top); spin_unlock(&sound_loader_lock); if (r < 0) goto fail; else if (r < SOUND_STEP) sprintf(s->name, "sound/%s", name); else sprintf(s->name, "sound/%s%d", name, r / SOUND_STEP); if (!preclaim_oss) { /* * Something else might have grabbed the minor. If * first free slot is requested, rescan with @low set * to the next unit; otherwise, -EBUSY. */ r = __register_chrdev(SOUND_MAJOR, s->unit_minor, 1, s->name, &soundcore_fops); if (r < 0) { spin_lock(&sound_loader_lock); __sound_remove_unit(list, s->unit_minor); if (index < 0) { low = s->unit_minor + SOUND_STEP; goto retry; } spin_unlock(&sound_loader_lock); return -EBUSY; } } device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor), NULL, s->name+6); return s->unit_minor; fail: kfree(s); return r; } /* * Remove a unit. Acquires locks as needed. The drivers MUST have * completed the removal before their file operations become * invalid. */ static void sound_remove_unit(struct sound_unit **list, int unit) { struct sound_unit *p; spin_lock(&sound_loader_lock); p = __sound_remove_unit(list, unit); spin_unlock(&sound_loader_lock); if (p) { if (!preclaim_oss) __unregister_chrdev(SOUND_MAJOR, p->unit_minor, 1, p->name); device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor)); kfree(p); } } /* * Allocations * * 0 *16 Mixers * 1 *8 Sequencers * 2 *16 Midi * 3 *16 DSP * 4 *16 SunDSP * 5 *16 DSP16 * 6 -- sndstat (obsolete) * 7 *16 unused * 8 -- alternate sequencer (see above) * 9 *16 raw synthesizer access * 10 *16 unused * 11 *16 unused * 12 *16 unused * 13 *16 unused * 14 *16 unused * 15 *16 unused */ static struct sound_unit *chains[SOUND_STEP]; /** * register_sound_special_device - register a special sound node * @fops: File operations for the driver * @unit: Unit number to allocate * @dev: device pointer * * Allocate a special sound device by minor number from the sound * subsystem. The allocated number is returned on success. On failure * a negative error code is returned. */ int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev) { const int chain = unit % SOUND_STEP; int max_unit = 128 + chain; const char *name; char _name[16]; switch (chain) { case 0: name = "mixer"; break; case 1: name = "sequencer"; if (unit >= SOUND_STEP) goto __unknown; max_unit = unit + 1; break; case 2: name = "midi"; break; case 3: name = "dsp"; break; case 4: name = "audio"; break; case 5: name = "dspW"; break; case 8: name = "sequencer2"; if (unit >= SOUND_STEP) goto __unknown; max_unit = unit + 1; break; case 9: name = "dmmidi"; break; case 10: name = "dmfm"; break; case 12: name = "adsp"; break; case 13: name = "amidi"; break; case 14: name = "admmidi"; break; default: { __unknown: sprintf(_name, "unknown%d", chain); if (unit >= SOUND_STEP) strcat(_name, "-"); name = _name; } break; } return sound_insert_unit(&chains[chain], fops, -1, unit, max_unit, name, S_IRUSR | S_IWUSR, dev); } EXPORT_SYMBOL(register_sound_special_device); int register_sound_special(const struct file_operations *fops, int unit) { return register_sound_special_device(fops, unit, NULL); } EXPORT_SYMBOL(register_sound_special); /** * register_sound_mixer - register a mixer device * @fops: File operations for the driver * @dev: Unit number to allocate * * Allocate a mixer device. Unit is the number of the mixer requested. * Pass -1 to request the next free mixer unit. On success the allocated * number is returned, on failure a negative error code is returned. */ int register_sound_mixer(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[0], fops, dev, 0, 128, "mixer", S_IRUSR | S_IWUSR, NULL); } EXPORT_SYMBOL(register_sound_mixer); /** * register_sound_midi - register a midi device * @fops: File operations for the driver * @dev: Unit number to allocate * * Allocate a midi device. Unit is the number of the midi device requested. * Pass -1 to request the next free midi unit. On success the allocated * number is returned, on failure a negative error code is returned. */ int register_sound_midi(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[2], fops, dev, 2, 130, "midi", S_IRUSR | S_IWUSR, NULL); } EXPORT_SYMBOL(register_sound_midi); /* * DSP's are registered as a triple. Register only one and cheat * in open - see below. */ /** * register_sound_dsp - register a DSP device * @fops: File operations for the driver * @dev: Unit number to allocate * * Allocate a DSP device. Unit is the number of the DSP requested. * Pass -1 to request the next free DSP unit. On success the allocated * number is returned, on failure a negative error code is returned. * * This function allocates both the audio and dsp device entries together * and will always allocate them as a matching pair - eg dsp3/audio3 */ int register_sound_dsp(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[3], fops, dev, 3, 131, "dsp", S_IWUSR | S_IRUSR, NULL); } EXPORT_SYMBOL(register_sound_dsp); /** * unregister_sound_special - unregister a special sound device * @unit: unit number to allocate * * Release a sound device that was allocated with * register_sound_special(). The unit passed is the return value from * the register function. */ void unregister_sound_special(int unit) { sound_remove_unit(&chains[unit % SOUND_STEP], unit); } EXPORT_SYMBOL(unregister_sound_special); /** * unregister_sound_mixer - unregister a mixer * @unit: unit number to allocate * * Release a sound device that was allocated with register_sound_mixer(). * The unit passed is the return value from the register function. */ void unregister_sound_mixer(int unit) { sound_remove_unit(&chains[0], unit); } EXPORT_SYMBOL(unregister_sound_mixer); /** * unregister_sound_midi - unregister a midi device * @unit: unit number to allocate * * Release a sound device that was allocated with register_sound_midi(). * The unit passed is the return value from the register function. */ void unregister_sound_midi(int unit) { sound_remove_unit(&chains[2], unit); } EXPORT_SYMBOL(unregister_sound_midi); /** * unregister_sound_dsp - unregister a DSP device * @unit: unit number to allocate * * Release a sound device that was allocated with register_sound_dsp(). * The unit passed is the return value from the register function. * * Both of the allocated units are released together automatically. */ void unregister_sound_dsp(int unit) { sound_remove_unit(&chains[3], unit); } EXPORT_SYMBOL(unregister_sound_dsp); static struct sound_unit *__look_for_unit(int chain, int unit) { struct sound_unit *s; s=chains[chain]; while(s && s->unit_minor <= unit) { if(s->unit_minor==unit) return s; s=s->next; } return NULL; } static int soundcore_open(struct inode *inode, struct file *file) { int chain; int unit = iminor(inode); struct sound_unit *s; const struct file_operations *new_fops = NULL; chain=unit&0x0F; if(chain==4 || chain==5) /* dsp/audio/dsp16 */ { unit&=0xF0; unit|=3; chain=3; } spin_lock(&sound_loader_lock); s = __look_for_unit(chain, unit); if (s) new_fops = fops_get(s->unit_fops); if (preclaim_oss && !new_fops) { spin_unlock(&sound_loader_lock); /* * Please, don't change this order or code. * For ALSA slot means soundcard and OSS emulation code * comes as add-on modules which aren't depend on * ALSA toplevel modules for soundcards, thus we need * load them at first. [Jaroslav Kysela <perex@jcu.cz>] */ request_module("sound-slot-%i", unit>>4); request_module("sound-service-%i-%i", unit>>4, chain); /* * sound-slot/service-* module aliases are scheduled * for removal in favor of the standard char-major-* * module aliases. For the time being, generate both * the legacy and standard module aliases to ease * transition. */ if (request_module("char-major-%d-%d", SOUND_MAJOR, unit) > 0) request_module("char-major-%d", SOUND_MAJOR); spin_lock(&sound_loader_lock); s = __look_for_unit(chain, unit); if (s) new_fops = fops_get(s->unit_fops); } if (new_fops) { /* * We rely upon the fact that we can't be unloaded while the * subdriver is there, so if ->open() is successful we can * safely drop the reference counter and if it is not we can * revert to old ->f_op. Ugly, indeed, but that's the cost of * switching ->f_op in the first place. */ int err = 0; const struct file_operations *old_fops = file->f_op; file->f_op = new_fops; spin_unlock(&sound_loader_lock); if (file->f_op->open) err = file->f_op->open(inode,file); if (err) { fops_put(file->f_op); file->f_op = fops_get(old_fops); } fops_put(old_fops); return err; } spin_unlock(&sound_loader_lock); return -ENODEV; } MODULE_ALIAS_CHARDEV_MAJOR(SOUND_MAJOR); static void cleanup_oss_soundcore(void) { /* We have nothing to really do here - we know the lists must be empty */ unregister_chrdev(SOUND_MAJOR, "sound"); } static int __init init_oss_soundcore(void) { if (preclaim_oss && register_chrdev(SOUND_MAJOR, "sound", &soundcore_fops) == -1) { printk(KERN_ERR "soundcore: sound device already in use.\n"); return -EBUSY; } return 0; } #endif /* CONFIG_SOUND_OSS_CORE */
gpl-2.0
EmmanuelU/wild_kernel_lge_gproj
net/can/proc.c
5120
15199
/* * proc.c - procfs support for Protocol family CAN core module * * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/if_arp.h> #include <linux/can/core.h> #include "af_can.h" /* * proc filenames for the PF_CAN core */ #define CAN_PROC_VERSION "version" #define CAN_PROC_STATS "stats" #define CAN_PROC_RESET_STATS "reset_stats" #define CAN_PROC_RCVLIST_ALL "rcvlist_all" #define CAN_PROC_RCVLIST_FIL "rcvlist_fil" #define CAN_PROC_RCVLIST_INV "rcvlist_inv" #define CAN_PROC_RCVLIST_SFF "rcvlist_sff" #define CAN_PROC_RCVLIST_EFF "rcvlist_eff" #define CAN_PROC_RCVLIST_ERR "rcvlist_err" static struct proc_dir_entry *can_dir; static struct proc_dir_entry *pde_version; static struct proc_dir_entry *pde_stats; static struct proc_dir_entry *pde_reset_stats; static struct proc_dir_entry *pde_rcvlist_all; static struct proc_dir_entry *pde_rcvlist_fil; static struct proc_dir_entry *pde_rcvlist_inv; static struct proc_dir_entry *pde_rcvlist_sff; static struct proc_dir_entry *pde_rcvlist_eff; static struct proc_dir_entry *pde_rcvlist_err; static int user_reset; static const char rx_list_name[][8] = { [RX_ERR] = "rx_err", [RX_ALL] = "rx_all", [RX_FIL] = "rx_fil", [RX_INV] = "rx_inv", [RX_EFF] = "rx_eff", }; /* receive filters subscribed for 'all' CAN devices */ extern struct dev_rcv_lists can_rx_alldev_list; /* * af_can statistics stuff */ static void can_init_stats(void) { /* * This memset function is called from a timer context (when * can_stattimer is active which is the default) OR in a process * context (reading the proc_fs when can_stattimer is disabled). */ memset(&can_stats, 0, sizeof(can_stats)); can_stats.jiffies_init = jiffies; can_pstats.stats_reset++; if (user_reset) { user_reset = 0; can_pstats.user_reset++; } } static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif, unsigned long count) { unsigned long rate; if (oldjif == newjif) return 0; /* see can_stat_update() - this should NEVER happen! */ if (count > (ULONG_MAX / HZ)) { printk(KERN_ERR "can: calc_rate: count exceeded! %ld\n", count); return 99999999; } rate = (count * HZ) / (newjif - oldjif); return rate; } void can_stat_update(unsigned long data) { unsigned long j = jiffies; /* snapshot */ /* restart counting in timer context on user request */ if (user_reset) can_init_stats(); /* restart counting on jiffies overflow */ if (j < can_stats.jiffies_init) can_init_stats(); /* prevent overflow in calc_rate() */ if (can_stats.rx_frames > (ULONG_MAX / HZ)) can_init_stats(); /* prevent overflow in calc_rate() */ if (can_stats.tx_frames > (ULONG_MAX / HZ)) can_init_stats(); /* matches overflow - very improbable */ if (can_stats.matches > (ULONG_MAX / 100)) can_init_stats(); /* calc total values */ if (can_stats.rx_frames) can_stats.total_rx_match_ratio = (can_stats.matches * 100) / can_stats.rx_frames; can_stats.total_tx_rate = calc_rate(can_stats.jiffies_init, j, can_stats.tx_frames); can_stats.total_rx_rate = calc_rate(can_stats.jiffies_init, j, can_stats.rx_frames); /* calc current values */ if (can_stats.rx_frames_delta) can_stats.current_rx_match_ratio = (can_stats.matches_delta * 100) / can_stats.rx_frames_delta; can_stats.current_tx_rate = calc_rate(0, HZ, can_stats.tx_frames_delta); can_stats.current_rx_rate = calc_rate(0, HZ, can_stats.rx_frames_delta); /* check / update maximum values */ if (can_stats.max_tx_rate < can_stats.current_tx_rate) can_stats.max_tx_rate = can_stats.current_tx_rate; if (can_stats.max_rx_rate < can_stats.current_rx_rate) can_stats.max_rx_rate = can_stats.current_rx_rate; if (can_stats.max_rx_match_ratio < can_stats.current_rx_match_ratio) can_stats.max_rx_match_ratio = can_stats.current_rx_match_ratio; /* clear values for 'current rate' calculation */ can_stats.tx_frames_delta = 0; can_stats.rx_frames_delta = 0; can_stats.matches_delta = 0; /* restart timer (one second) */ mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); } /* * proc read functions */ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, struct net_device *dev) { struct receiver *r; struct hlist_node *n; hlist_for_each_entry_rcu(r, n, rx_list, list) { char *fmt = (r->can_id & CAN_EFF_FLAG)? " %-5s %08x %08x %pK %pK %8ld %s\n" : " %-5s %03x %08x %pK %pK %8ld %s\n"; seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, r->func, r->data, r->matches, r->ident); } } static void can_print_recv_banner(struct seq_file *m) { /* * can1. 00000000 00000000 00000000 * ....... 0 tp20 */ seq_puts(m, " device can_id can_mask function" " userdata matches ident\n"); } static int can_stats_proc_show(struct seq_file *m, void *v) { seq_putc(m, '\n'); seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames); seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames); seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches); seq_putc(m, '\n'); if (can_stattimer.function == can_stat_update) { seq_printf(m, " %8ld %% total match ratio (RXMR)\n", can_stats.total_rx_match_ratio); seq_printf(m, " %8ld frames/s total tx rate (TXR)\n", can_stats.total_tx_rate); seq_printf(m, " %8ld frames/s total rx rate (RXR)\n", can_stats.total_rx_rate); seq_putc(m, '\n'); seq_printf(m, " %8ld %% current match ratio (CRXMR)\n", can_stats.current_rx_match_ratio); seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n", can_stats.current_tx_rate); seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n", can_stats.current_rx_rate); seq_putc(m, '\n'); seq_printf(m, " %8ld %% max match ratio (MRXMR)\n", can_stats.max_rx_match_ratio); seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n", can_stats.max_tx_rate); seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n", can_stats.max_rx_rate); seq_putc(m, '\n'); } seq_printf(m, " %8ld current receive list entries (CRCV)\n", can_pstats.rcv_entries); seq_printf(m, " %8ld maximum receive list entries (MRCV)\n", can_pstats.rcv_entries_max); if (can_pstats.stats_reset) seq_printf(m, "\n %8ld statistic resets (STR)\n", can_pstats.stats_reset); if (can_pstats.user_reset) seq_printf(m, " %8ld user statistic resets (USTR)\n", can_pstats.user_reset); seq_putc(m, '\n'); return 0; } static int can_stats_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_stats_proc_show, NULL); } static const struct file_operations can_stats_proc_fops = { .owner = THIS_MODULE, .open = can_stats_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int can_reset_stats_proc_show(struct seq_file *m, void *v) { user_reset = 1; if (can_stattimer.function == can_stat_update) { seq_printf(m, "Scheduled statistic reset #%ld.\n", can_pstats.stats_reset + 1); } else { if (can_stats.jiffies_init != jiffies) can_init_stats(); seq_printf(m, "Performed statistic reset #%ld.\n", can_pstats.stats_reset); } return 0; } static int can_reset_stats_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_reset_stats_proc_show, NULL); } static const struct file_operations can_reset_stats_proc_fops = { .owner = THIS_MODULE, .open = can_reset_stats_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int can_version_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%s\n", CAN_VERSION_STRING); return 0; } static int can_version_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_version_proc_show, NULL); } static const struct file_operations can_version_proc_fops = { .owner = THIS_MODULE, .open = can_version_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx, struct net_device *dev, struct dev_rcv_lists *d) { if (!hlist_empty(&d->rx[idx])) { can_print_recv_banner(m); can_print_rcvlist(m, &d->rx[idx], dev); } else seq_printf(m, " (%s: no entry)\n", DNAME(dev)); } static int can_rcvlist_proc_show(struct seq_file *m, void *v) { /* double cast to prevent GCC warning */ int idx = (int)(long)m->private; struct net_device *dev; struct dev_rcv_lists *d; seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); rcu_read_lock(); /* receive list for 'all' CAN devices (dev == NULL) */ d = &can_rx_alldev_list; can_rcvlist_proc_show_one(m, idx, NULL, d); /* receive list for registered CAN devices */ for_each_netdev_rcu(&init_net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv); } rcu_read_unlock(); seq_putc(m, '\n'); return 0; } static int can_rcvlist_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_rcvlist_proc_show, PDE(inode)->data); } static const struct file_operations can_rcvlist_proc_fops = { .owner = THIS_MODULE, .open = can_rcvlist_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m, struct net_device *dev, struct dev_rcv_lists *d) { int i; int all_empty = 1; /* check wether at least one list is non-empty */ for (i = 0; i < 0x800; i++) if (!hlist_empty(&d->rx_sff[i])) { all_empty = 0; break; } if (!all_empty) { can_print_recv_banner(m); for (i = 0; i < 0x800; i++) { if (!hlist_empty(&d->rx_sff[i])) can_print_rcvlist(m, &d->rx_sff[i], dev); } } else seq_printf(m, " (%s: no entry)\n", DNAME(dev)); } static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) { struct net_device *dev; struct dev_rcv_lists *d; /* RX_SFF */ seq_puts(m, "\nreceive list 'rx_sff':\n"); rcu_read_lock(); /* sff receive list for 'all' CAN devices (dev == NULL) */ d = &can_rx_alldev_list; can_rcvlist_sff_proc_show_one(m, NULL, d); /* sff receive list for registered CAN devices */ for_each_netdev_rcu(&init_net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv); } rcu_read_unlock(); seq_putc(m, '\n'); return 0; } static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_rcvlist_sff_proc_show, NULL); } static const struct file_operations can_rcvlist_sff_proc_fops = { .owner = THIS_MODULE, .open = can_rcvlist_sff_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * proc utility functions */ static void can_remove_proc_readentry(const char *name) { if (can_dir) remove_proc_entry(name, can_dir); } /* * can_init_proc - create main CAN proc directory and procfs entries */ void can_init_proc(void) { /* create /proc/net/can directory */ can_dir = proc_mkdir("can", init_net.proc_net); if (!can_dir) { printk(KERN_INFO "can: failed to create /proc/net/can . " "CONFIG_PROC_FS missing?\n"); return; } /* own procfs entries from the AF_CAN core */ pde_version = proc_create(CAN_PROC_VERSION, 0644, can_dir, &can_version_proc_fops); pde_stats = proc_create(CAN_PROC_STATS, 0644, can_dir, &can_stats_proc_fops); pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir, &can_reset_stats_proc_fops); pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_ERR); pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_ALL); pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_FIL); pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_INV); pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_EFF); pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir, &can_rcvlist_sff_proc_fops); } /* * can_remove_proc - remove procfs entries and main CAN proc directory */ void can_remove_proc(void) { if (pde_version) can_remove_proc_readentry(CAN_PROC_VERSION); if (pde_stats) can_remove_proc_readentry(CAN_PROC_STATS); if (pde_reset_stats) can_remove_proc_readentry(CAN_PROC_RESET_STATS); if (pde_rcvlist_err) can_remove_proc_readentry(CAN_PROC_RCVLIST_ERR); if (pde_rcvlist_all) can_remove_proc_readentry(CAN_PROC_RCVLIST_ALL); if (pde_rcvlist_fil) can_remove_proc_readentry(CAN_PROC_RCVLIST_FIL); if (pde_rcvlist_inv) can_remove_proc_readentry(CAN_PROC_RCVLIST_INV); if (pde_rcvlist_eff) can_remove_proc_readentry(CAN_PROC_RCVLIST_EFF); if (pde_rcvlist_sff) can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF); if (can_dir) proc_net_remove(&init_net, "can"); }
gpl-2.0
elisam98/android_kernel_lge_x5
drivers/staging/vt6655/vntwifi.c
8192
20104
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: vntwifi.c * * Purpose: export functions for vntwifi lib * * Functions: * * Revision History: * * Author: Yiching Chen * * Date: feb. 2, 2005 * */ #include "vntwifi.h" #include "IEEE11h.h" #include "country.h" #include "device.h" #include "wmgr.h" #include "datarate.h" //#define PLICE_DEBUG /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; //static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Description: * Set Operation Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * eOPMode - Opreation Mode * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetOPMode ( void *pMgmtHandle, WMAC_CONFIG_MODE eOPMode ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->eConfigMode = eOPMode; } /*+ * * Description: * Set Operation Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * wBeaconPeriod - Beacon Period * wATIMWindow - ATIM window * uChannel - channel number * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetIBSSParameter ( void *pMgmtHandle, unsigned short wBeaconPeriod, unsigned short wATIMWindow, unsigned int uChannel ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->wIBSSBeaconPeriod = wBeaconPeriod; pMgmt->wIBSSATIMWindow = wATIMWindow; pMgmt->uIBSSChannel = uChannel; } /*+ * * Description: * Get current SSID * * Parameters: * In: * pMgmtHandle - pointer to management object * Out: * none * * Return Value: current SSID pointer. * -*/ PWLAN_IE_SSID VNTWIFIpGetCurrentSSID ( void *pMgmtHandle ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; return((PWLAN_IE_SSID) pMgmt->abyCurrSSID); } /*+ * * Description: * Get current link channel * * Parameters: * In: * pMgmtHandle - pointer to management object * Out: * none * * Return Value: current Channel. * -*/ unsigned int VNTWIFIpGetCurrentChannel ( void *pMgmtHandle ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if (pMgmtHandle != NULL) { return (pMgmt->uCurrChannel); } return 0; } /*+ * * Description: * Get current Assoc ID * * Parameters: * In: * pMgmtHandle - pointer to management object * Out: * none * * Return Value: current Assoc ID * -*/ unsigned short VNTWIFIwGetAssocID ( void *pMgmtHandle ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; return(pMgmt->wCurrAID); } /*+ * * Description: * This routine return max support rate of IES * * Parameters: * In: * pSupportRateIEs * pExtSupportRateIEs * * Out: * * Return Value: max support rate * -*/ unsigned char VNTWIFIbyGetMaxSupportRate ( PWLAN_IE_SUPP_RATES pSupportRateIEs, PWLAN_IE_SUPP_RATES pExtSupportRateIEs ) { unsigned char byMaxSupportRate = RATE_1M; unsigned char bySupportRate = RATE_1M; unsigned int ii = 0; if (pSupportRateIEs) { for (ii = 0; ii < pSupportRateIEs->len; ii++) { bySupportRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]); if (bySupportRate > byMaxSupportRate) { byMaxSupportRate = bySupportRate; } } } if (pExtSupportRateIEs) { for (ii = 0; ii < pExtSupportRateIEs->len; ii++) { bySupportRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]); if (bySupportRate > byMaxSupportRate) { byMaxSupportRate = bySupportRate; } } } return byMaxSupportRate; } /*+ * * Description: * This routine return data rate of ACK packtet * * Parameters: * In: * byRxDataRate * pSupportRateIEs * pExtSupportRateIEs * * Out: * * Return Value: max support rate * -*/ unsigned char VNTWIFIbyGetACKTxRate ( unsigned char byRxDataRate, PWLAN_IE_SUPP_RATES pSupportRateIEs, PWLAN_IE_SUPP_RATES pExtSupportRateIEs ) { unsigned char byMaxAckRate; unsigned char byBasicRate; unsigned int ii; if (byRxDataRate <= RATE_11M) { byMaxAckRate = RATE_1M; } else { // 24M is mandatory for 802.11a and 802.11g byMaxAckRate = RATE_24M; } if (pSupportRateIEs) { for (ii = 0; ii < pSupportRateIEs->len; ii++) { if (pSupportRateIEs->abyRates[ii] & 0x80) { byBasicRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]); if ((byBasicRate <= byRxDataRate) && (byBasicRate > byMaxAckRate)) { byMaxAckRate = byBasicRate; } } } } if (pExtSupportRateIEs) { for (ii = 0; ii < pExtSupportRateIEs->len; ii++) { if (pExtSupportRateIEs->abyRates[ii] & 0x80) { byBasicRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]); if ((byBasicRate <= byRxDataRate) && (byBasicRate > byMaxAckRate)) { byMaxAckRate = byBasicRate; } } } } return byMaxAckRate; } /*+ * * Description: * Set Authentication Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * eAuthMode - Authentication mode * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetAuthenticationMode ( void *pMgmtHandle, WMAC_AUTHENTICATION_MODE eAuthMode ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->eAuthenMode = eAuthMode; if ((eAuthMode == WMAC_AUTH_SHAREKEY) || (eAuthMode == WMAC_AUTH_AUTO)) { pMgmt->bShareKeyAlgorithm = true; } else { pMgmt->bShareKeyAlgorithm = false; } } /*+ * * Description: * Set Encryption Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * eAuthMode - Authentication mode * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetEncryptionMode ( void *pMgmtHandle, WMAC_ENCRYPTION_MODE eEncryptionMode ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->eEncryptionMode = eEncryptionMode; if ((eEncryptionMode == WMAC_ENCRYPTION_WEPEnabled) || (eEncryptionMode == WMAC_ENCRYPTION_TKIPEnabled) || (eEncryptionMode == WMAC_ENCRYPTION_AESEnabled) ) { pMgmt->bPrivacyInvoked = true; } else { pMgmt->bPrivacyInvoked = false; } } bool VNTWIFIbConfigPhyMode ( void *pMgmtHandle, CARD_PHY_TYPE ePhyType ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if ((ePhyType != PHY_TYPE_AUTO) && (ePhyType != pMgmt->eCurrentPHYMode)) { if (CARDbSetPhyParameter(pMgmt->pAdapter, ePhyType, 0, 0, NULL, NULL)==true) { pMgmt->eCurrentPHYMode = ePhyType; } else { return(false); } } pMgmt->eConfigPHYMode = ePhyType; return(true); } void VNTWIFIbGetConfigPhyMode ( void *pMgmtHandle, void *pePhyType ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if ((pMgmt != NULL) && (pePhyType != NULL)) { *(PCARD_PHY_TYPE)pePhyType = pMgmt->eConfigPHYMode; } } /*+ * * Description: * Clear BSS List Database except current assoc BSS * * Parameters: * In: * pMgmtHandle - Management Object structure * bLinkPass - Current Link status * Out: * * Return Value: None. * -*/ /*+ * * Description: * Query BSS List in management database * * Parameters: * In: * pMgmtHandle - Management Object structure * Out: * puBSSCount - BSS count * pvFirstBSS - pointer to first BSS * * Return Value: None. * -*/ void VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount, void **pvFirstBSS) { unsigned int ii = 0; PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; PKnownBSS pBSS = NULL; unsigned int uCount = 0; *pvFirstBSS = NULL; for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSS = &(pMgmt->sBSSList[ii]); if (!pBSS->bActive) { continue; } if (*pvFirstBSS == NULL) { *pvFirstBSS = &(pMgmt->sBSSList[ii]); } uCount++; } *puBSSCount = uCount; } void VNTWIFIvGetNextBSS ( void *pMgmtHandle, void *pvCurrentBSS, void **pvNextBSS ) { PKnownBSS pBSS = (PKnownBSS) pvCurrentBSS; PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; *pvNextBSS = NULL; while (*pvNextBSS == NULL) { pBSS++; if (pBSS > &(pMgmt->sBSSList[MAX_BSS_NUM])) { return; } if (pBSS->bActive == true) { *pvNextBSS = pBSS; return; } } } /*+ * * Description: * Update Tx attemps, Tx failure counter in Node DB * * In: * Out: * none * * Return Value: none * -*/ void VNTWIFIvUpdateNodeTxCounter( void *pMgmtHandle, unsigned char *pbyDestAddress, bool bTxOk, unsigned short wRate, unsigned char *pbyTxFailCount ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; unsigned int uNodeIndex = 0; unsigned int ii; if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) || (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex) == false) { return; } } pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++; if (bTxOk == true) { // transmit success, TxAttempts at least plus one pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++; pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++; } else { pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++; } pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += pbyTxFailCount[MAX_RATE]; for(ii=0;ii<MAX_RATE;ii++) { pMgmt->sNodeDBTable[uNodeIndex].uTxFail[ii] += pbyTxFailCount[ii]; } return; } void VNTWIFIvGetTxRate( void *pMgmtHandle, unsigned char *pbyDestAddress, unsigned short *pwTxDataRate, unsigned char *pbyACKRate, unsigned char *pbyCCKBasicRate, unsigned char *pbyOFDMBasicRate ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; unsigned int uNodeIndex = 0; unsigned short wTxDataRate = RATE_1M; unsigned char byACKRate = RATE_1M; unsigned char byCCKBasicRate = RATE_1M; unsigned char byOFDMBasicRate = RATE_24M; PWLAN_IE_SUPP_RATES pSupportRateIEs = NULL; PWLAN_IE_SUPP_RATES pExtSupportRateIEs = NULL; if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) || (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { // Adhoc Tx rate decided from node DB if(BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex)) { wTxDataRate = (pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate); pSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrSuppRates); pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrExtSuppRates); } else { if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A) { wTxDataRate = RATE_2M; } else { wTxDataRate = RATE_24M; } pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates; pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates; } } else { // Infrastructure: rate decided from AP Node, index = 0 wTxDataRate = (pMgmt->sNodeDBTable[0].wTxDataRate); #ifdef PLICE_DEBUG printk(KERN_DEBUG "GetTxRate:AP MAC is %pM,TxRate is %d\n", pMgmt->sNodeDBTable[0].abyMACAddr, wTxDataRate); #endif pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates; pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates; } byACKRate = VNTWIFIbyGetACKTxRate( (unsigned char) wTxDataRate, pSupportRateIEs, pExtSupportRateIEs ); if (byACKRate > (unsigned char) wTxDataRate) { byACKRate = (unsigned char) wTxDataRate; } byCCKBasicRate = VNTWIFIbyGetACKTxRate( RATE_11M, pSupportRateIEs, pExtSupportRateIEs ); byOFDMBasicRate = VNTWIFIbyGetACKTxRate(RATE_54M, pSupportRateIEs, pExtSupportRateIEs ); *pwTxDataRate = wTxDataRate; *pbyACKRate = byACKRate; *pbyCCKBasicRate = byCCKBasicRate; *pbyOFDMBasicRate = byOFDMBasicRate; return; } unsigned char VNTWIFIbyGetKeyCypher( void *pMgmtHandle, bool bGroupKey ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if (bGroupKey == true) { return (pMgmt->byCSSGK); } else { return (pMgmt->byCSSPK); } } /* bool VNTWIFIbInit( void *pAdapterHandler, void **pMgmtHandler ) { PSMgmtObject pMgmt = NULL; unsigned int ii; pMgmt = (PSMgmtObject)kmalloc(sizeof(SMgmtObject), (int)GFP_ATOMIC); if (pMgmt == NULL) { *pMgmtHandler = NULL; return false; } memset(pMgmt, 0, sizeof(SMgmtObject)); pMgmt->pAdapter = (void *) pAdapterHandler; // should initial MAC address abyMACAddr for(ii=0;ii<WLAN_BSSID_LEN;ii++) { pMgmt->abyDesireBSSID[ii] = 0xFF; } pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0]; pMgmt->pbyMgmtPacketPool = &pMgmt->byMgmtPacketPool[0]; pMgmt->byCSSPK = KEY_CTL_NONE; pMgmt->byCSSGK = KEY_CTL_NONE; pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI; pMgmt->cbFreeCmdQueue = CMD_Q_SIZE; pMgmt->uCmdDequeueIdx = 0; pMgmt->uCmdEnqueueIdx = 0; pMgmt->eCommandState = WLAN_CMD_STATE_IDLE; pMgmt->bCmdStop = false; pMgmt->bCmdRunning = false; *pMgmtHandler = pMgmt; return true; } */ bool VNTWIFIbSetPMKIDCache ( void *pMgmtObject, unsigned long ulCount, void *pPMKIDInfo ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; if (ulCount > MAX_PMKID_CACHE) { return (false); } pMgmt->gsPMKIDCache.BSSIDInfoCount = ulCount; memcpy(pMgmt->gsPMKIDCache.BSSIDInfo, pPMKIDInfo, (ulCount*sizeof(PMKIDInfo))); return (true); } unsigned short VNTWIFIwGetMaxSupportRate( void *pMgmtObject ) { unsigned short wRate = RATE_54M; PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; for(wRate = RATE_54M; wRate > RATE_1M; wRate--) { if (pMgmt->sNodeDBTable[0].wSuppRate & (1<<wRate)) { return (wRate); } } if (pMgmt->eCurrentPHYMode == PHY_TYPE_11A) { return (RATE_6M); } else { return (RATE_1M); } } void VNTWIFIvSet11h ( void *pMgmtObject, bool b11hEnable ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; pMgmt->b11hEnable = b11hEnable; } bool VNTWIFIbMeasureReport( void *pMgmtObject, bool bEndOfReport, void *pvMeasureEID, unsigned char byReportMode, unsigned char byBasicMap, unsigned char byCCAFraction, unsigned char *pbyRPIs ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; unsigned char *pbyCurrentEID = (unsigned char *) (pMgmt->pCurrMeasureEIDRep); //spin_lock_irq(&pDevice->lock); if ((pvMeasureEID != NULL) && (pMgmt->uLengthOfRepEIDs < (WLAN_A3FR_MAXLEN - sizeof(MEASEURE_REP) - sizeof(WLAN_80211HDR_A3) - 3)) ) { pMgmt->pCurrMeasureEIDRep->byElementID = WLAN_EID_MEASURE_REP; pMgmt->pCurrMeasureEIDRep->len = 3; pMgmt->pCurrMeasureEIDRep->byToken = ((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->byToken; pMgmt->pCurrMeasureEIDRep->byMode = byReportMode; pMgmt->pCurrMeasureEIDRep->byType = ((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->byType; switch (pMgmt->pCurrMeasureEIDRep->byType) { case MEASURE_TYPE_BASIC : pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_BASIC); memcpy( &(pMgmt->pCurrMeasureEIDRep->sRep.sBasic), &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq), sizeof(MEASEURE_REQ)); pMgmt->pCurrMeasureEIDRep->sRep.sBasic.byMap = byBasicMap; break; case MEASURE_TYPE_CCA : pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_CCA); memcpy( &(pMgmt->pCurrMeasureEIDRep->sRep.sCCA), &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq), sizeof(MEASEURE_REQ)); pMgmt->pCurrMeasureEIDRep->sRep.sCCA.byCCABusyFraction = byCCAFraction; break; case MEASURE_TYPE_RPI : pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_RPI); memcpy( &(pMgmt->pCurrMeasureEIDRep->sRep.sRPI), &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq), sizeof(MEASEURE_REQ)); memcpy(pMgmt->pCurrMeasureEIDRep->sRep.sRPI.abyRPIdensity, pbyRPIs, 8); break; default : break; } pbyCurrentEID += (2 + pMgmt->pCurrMeasureEIDRep->len); pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len); pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID; } if (bEndOfReport == true) { IEEE11hbMSRRepTx(pMgmt); } //spin_unlock_irq(&pDevice->lock); return (true); } bool VNTWIFIbChannelSwitch( void *pMgmtObject, unsigned char byNewChannel ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; //spin_lock_irq(&pDevice->lock); pMgmt->uCurrChannel = byNewChannel; pMgmt->bSwitchChannel = false; //spin_unlock_irq(&pDevice->lock); return true; } /* bool VNTWIFIbRadarPresent( void *pMgmtObject, unsigned char byChannel ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (byChannel == (unsigned char) pMgmt->uCurrChannel) && (pMgmt->bSwitchChannel != true) && (pMgmt->b11hEnable == true)) { if (!compare_ether_addr(pMgmt->abyIBSSDFSOwner, CARDpGetCurrentAddress(pMgmt->pAdapter))) { pMgmt->byNewChannel = CARDbyAutoChannelSelect(pMgmt->pAdapter,(unsigned char) pMgmt->uCurrChannel); pMgmt->bSwitchChannel = true; } BEACONbSendBeacon(pMgmt); CARDbChannelSwitch(pMgmt->pAdapter, 0, pMgmt->byNewChannel, 10); } return true; } */
gpl-2.0
Tesla-Redux-Devices/kernel_apq8064
arch/cris/arch-v32/mach-fs/cpufreq.c
9472
3546
#include <linux/init.h> #include <linux/module.h> #include <linux/cpufreq.h> #include <hwregs/reg_map.h> #include <arch/hwregs/reg_rdwr.h> #include <arch/hwregs/config_defs.h> #include <arch/hwregs/bif_core_defs.h> static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data); static struct notifier_block cris_sdram_freq_notifier_block = { .notifier_call = cris_sdram_freq_notifier }; static struct cpufreq_frequency_table cris_freq_table[] = { {0x01, 6000}, {0x02, 200000}, {0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) { reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); return clk_ctrl.pll ? 200000 : 6000; } static void cris_freq_set_cpu_state(unsigned int state) { int i; struct cpufreq_freqs freqs; reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); for_each_possible_cpu(i) { freqs.old = cris_freq_get_cpu_frequency(i); freqs.new = cris_freq_table[state].frequency; freqs.cpu = i; } cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_disable(); /* Even though we may be SMP they will share the same clock * so all settings are made on CPU0. */ if (cris_freq_table[state].frequency == 200000) clk_ctrl.pll = 1; else clk_ctrl.pll = 0; REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl); local_irq_enable(); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); }; static int cris_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); } static int cris_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; if (cpufreq_frequency_table_target (policy, cris_freq_table, target_freq, relation, &newstate)) return -EINVAL; cris_freq_set_cpu_state(newstate); return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { int result; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ policy->cur = cris_freq_get_cpu_frequency(0); result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); if (result) return (result); cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); return 0; } static int cris_freq_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *cris_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, .verify = cris_freq_verify, .target = cris_freq_target, .init = cris_freq_cpu_init, .exit = cris_freq_cpu_exit, .name = "cris_freq", .owner = THIS_MODULE, .attr = cris_freq_attr, }; static int __init cris_freq_init(void) { int ret; ret = cpufreq_register_driver(&cris_freq_driver); cpufreq_register_notifier(&cris_sdram_freq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return ret; } static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data) { int i; struct cpufreq_freqs *freqs = data; if (val == CPUFREQ_PRECHANGE) { reg_bif_core_rw_sdram_timing timing = REG_RD(bif_core, regi_bif_core, rw_sdram_timing); timing.cpd = (freqs->new == 200000 ? 0 : 1); if (freqs->new == 200000) for (i = 0; i < 50000; i++) ; REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); } return 0; } module_init(cris_freq_init);
gpl-2.0
n3ocort3x/Kernel_one_x_sense
arch/arm/mach-tegra/cpu-tegra3.c
1
23550
/* * arch/arm/mach-tegra/cpu-tegra3.c * * CPU auto-hotplug for Tegra3 CPUs * * Copyright (c) 2011, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/cpu.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/pm_qos_params.h> #include <linux/fs.h> #include <linux/uaccess.h> #include "pm.h" #include "cpu-tegra.h" #include "clock.h" #ifdef PWR_DEVICE_TAG #undef PWR_DEVICE_TAG #endif #define PWR_DEVICE_TAG "CPUHP" #define INITIAL_STATE TEGRA_HP_DISABLED #define UP2G0_DELAY_MS 200 #define UP2Gn_DELAY_MS 1000 #define DOWN_DELAY_MS 2000 #define CPU_HOTPLUG_TAG "[CPUHP]" static struct mutex *tegra3_cpu_lock; static struct workqueue_struct *hotplug_wq; static struct delayed_work hotplug_work; static struct workqueue_struct *cpuplug_wq; static struct work_struct cpuplug_work; static bool is_plugging; static bool no_lp; module_param(no_lp, bool, 0644); static unsigned long up2gn_delay; static unsigned long up2g0_delay; static unsigned long down_delay; module_param(up2gn_delay, ulong, 0644); module_param(up2g0_delay, ulong, 0644); module_param(down_delay, ulong, 0644); static unsigned int idle_top_freq; static unsigned int idle_bottom_freq; module_param(idle_top_freq, uint, 0644); module_param(idle_bottom_freq, uint, 0644); static int mp_overhead = 10; module_param(mp_overhead, int, 0644); static int balance_level = 65; module_param(balance_level, int, 0644); static int up_time = 100; module_param(up_time, int, 0644); static int down_time = 200; module_param(down_time, int, 0644); static struct clk *cpu_clk; static struct clk *cpu_g_clk; static struct clk *cpu_lp_clk; ktime_t active_start_time; static struct { cputime64_t total_active_Time; cputime64_t this_active_Time; } cpu_hp_active_time_stats[CONFIG_NR_CPUS]; static struct { cputime64_t time_up_total; u64 last_update; unsigned int up_down_count; } hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */ static void hp_init_stats(void) { int i; u64 cur_jiffies = get_jiffies_64(); for (i = 0; i <= CONFIG_NR_CPUS; i++) { hp_stats[i].time_up_total = 0; hp_stats[i].last_update = cur_jiffies; hp_stats[i].up_down_count = 0; if (is_lp_cluster()) { if (i == CONFIG_NR_CPUS) hp_stats[i].up_down_count = 1; } else { if ((i < nr_cpu_ids) && cpu_online(i)) hp_stats[i].up_down_count = 1; } } } static void hp_stats_update(unsigned int cpu, bool up) { u64 cur_jiffies = get_jiffies_64(); bool was_up = hp_stats[cpu].up_down_count & 0x1; if (was_up) hp_stats[cpu].time_up_total = cputime64_add( hp_stats[cpu].time_up_total, cputime64_sub( cur_jiffies, hp_stats[cpu].last_update)); if (was_up != up) { hp_stats[cpu].up_down_count++; if ((hp_stats[cpu].up_down_count & 0x1) != up) { /* FIXME: sysfs user space CPU control breaks stats */ pr_err(CPU_HOTPLUG_TAG"tegra hotplug stats out of sync with %s CPU%d", (cpu < CONFIG_NR_CPUS) ? "G" : "LP", (cpu < CONFIG_NR_CPUS) ? cpu : 0); hp_stats[cpu].up_down_count ^= 0x1; } } hp_stats[cpu].last_update = cur_jiffies; } enum { TEGRA_HP_DISABLED = 0, TEGRA_HP_IDLE, TEGRA_HP_DOWN, TEGRA_HP_UP, }; static int hp_state; static int mp_state; static int last_state; static int hp_state_set(const char *arg, const struct kernel_param *kp) { int ret = 0; int old_state; if (!tegra3_cpu_lock) return ret; mutex_lock(tegra3_cpu_lock); old_state = hp_state; ret = param_set_bool(arg, kp); /* set idle or disabled only */ if (ret == 0) { if ((hp_state == TEGRA_HP_DISABLED) && (old_state != TEGRA_HP_DISABLED)) pr_info(CPU_HOTPLUG_TAG" Tegra auto hotplug disabled\n"); else if (hp_state != TEGRA_HP_DISABLED) { if (old_state == TEGRA_HP_DISABLED) { pr_info("Tegra auto-hotplug enabled\n"); hp_init_stats(); } active_start_time = ktime_get(); /* catch-up with governor target speed */ tegra_cpu_set_speed_cap(NULL); } } else pr_warn(CPU_HOTPLUG_TAG" %s: unable to set tegra hotplug state %s\n", __func__, arg); mutex_unlock(tegra3_cpu_lock); return ret; } static int hp_state_get(char *buffer, const struct kernel_param *kp) { return param_get_int(buffer, kp); } static struct kernel_param_ops tegra_hp_state_ops = { .set = hp_state_set, .get = hp_state_get, }; module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644); static unsigned int NwNs_Threshold[] = {19, 30, 19, 11, 19, 11, 0, 11}; static unsigned int TwTs_Threshold[] = {140, 0, 140, 190, 140, 190, 0, 190}; extern unsigned int get_rq_info(void); static unsigned int NwNs[8] = {19, 30, 19, 11, 19, 11, 0, 11}; module_param_array(NwNs, uint, NULL, 0644); static unsigned int TwTs[8] = {140, 0, 140, 190, 140, 190, 0, 190}; module_param_array(TwTs, uint, NULL, 0644); extern unsigned int set_rq_poll_ms(unsigned int poll_ms); static int mp_policy = 0; static int mp_policy_set(const char *arg, const struct kernel_param *kp) { int ret = 0; if (!tegra3_cpu_lock) return ret; mutex_lock(tegra3_cpu_lock); ret = param_set_int(arg, kp); if (ret == 0) { if (mp_policy > 0) { memcpy(NwNs_Threshold, NwNs, sizeof(unsigned int)*8); memcpy(TwTs_Threshold, TwTs, sizeof(unsigned int)*8); set_rq_poll_ms(9); } else { mp_policy = 0; pr_info(CPU_HOTPLUG_TAG" mp_policy is off\n"); } } else pr_warn(CPU_HOTPLUG_TAG" %s: unable to set tegra mp_policy %s\n", __func__, arg); mutex_unlock(tegra3_cpu_lock); return ret; } static int mp_policy_get(char *buffer, const struct kernel_param *kp) { return param_get_int(buffer, kp); } static struct kernel_param_ops tegra_mp_policy_ops = { .set = mp_policy_set, .get = mp_policy_get, }; module_param_cb(mp_policy, &tegra_mp_policy_ops, &mp_policy, 0644); enum { TEGRA_CPU_SPEED_BALANCED, TEGRA_CPU_SPEED_BIASED, TEGRA_CPU_SPEED_SKEWED, }; static noinline int tegra_cpu_speed_balance(void) { unsigned long highest_speed = tegra_cpu_highest_speed(); unsigned long balanced_speed = highest_speed * balance_level / 100; unsigned long skewed_speed = balanced_speed / 2; unsigned int nr_cpus = num_online_cpus(); unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4; unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS); /* balanced: freq targets for all CPUs are above 50% of highest speed biased: freq target for at least one CPU is below 50% threshold skewed: freq targets for at least 2 CPUs are below 25% threshold */ if (((tegra_count_slow_cpus(skewed_speed) >= 2) || tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) || (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) && (nr_cpus > min_cpus)) return TEGRA_CPU_SPEED_SKEWED; if (((tegra_count_slow_cpus(balanced_speed) >= 1) || (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) || (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) && (nr_cpus >= min_cpus)) return TEGRA_CPU_SPEED_BIASED; return TEGRA_CPU_SPEED_BALANCED; } void updateCurrentCPUTotalActiveTime() { int active_cpus_count = 0; ktime_t current_kt = ktime_get(); active_cpus_count = (cpu_online(0) + cpu_online(1) + cpu_online(2) + cpu_online(3)) - 1; cpu_hp_active_time_stats[active_cpus_count].this_active_Time = ktime_to_us(ktime_sub(current_kt, active_start_time)); cpu_hp_active_time_stats[active_cpus_count].total_active_Time += cpu_hp_active_time_stats[active_cpus_count].this_active_Time; active_start_time = current_kt; } void printCPUTotalActiveTime() { updateCurrentCPUTotalActiveTime(); pr_pwr_story("CPUs total active time:%llu,%llu,%llu,%llu", cpu_hp_active_time_stats[0].total_active_Time, cpu_hp_active_time_stats[1].total_active_Time, cpu_hp_active_time_stats[2].total_active_Time, cpu_hp_active_time_stats[3].total_active_Time); } static void tegra_auto_hotplug_work_func(struct work_struct *work) { bool up = false; unsigned int cpu = nr_cpu_ids; mutex_lock(tegra3_cpu_lock); if (mp_policy && !is_lp_cluster()) { mutex_unlock(tegra3_cpu_lock); return; } switch (hp_state) { case TEGRA_HP_DISABLED: case TEGRA_HP_IDLE: break; case TEGRA_HP_DOWN: cpu = tegra_get_slowest_cpu_n(); if (cpu < nr_cpu_ids) { up = false; queue_delayed_work( hotplug_wq, &hotplug_work, down_delay); hp_stats_update(cpu, false); } else if (!is_lp_cluster() && !no_lp) { if(!clk_set_parent(cpu_clk, cpu_lp_clk)) { hp_stats_update(CONFIG_NR_CPUS, true); hp_stats_update(0, false); /* catch-up with governor target speed */ tegra_cpu_set_speed_cap(NULL); } else queue_delayed_work( hotplug_wq, &hotplug_work, down_delay); } break; case TEGRA_HP_UP: if (is_lp_cluster() && !no_lp) { if(!clk_set_parent(cpu_clk, cpu_g_clk)) { hp_stats_update(CONFIG_NR_CPUS, false); hp_stats_update(0, true); /* catch-up with governor target speed */ tegra_cpu_set_speed_cap(NULL); } } else { switch (tegra_cpu_speed_balance()) { /* cpu speed is up and balanced - one more on-line */ case TEGRA_CPU_SPEED_BALANCED: cpu = cpumask_next_zero(0, cpu_online_mask); if (cpu < nr_cpu_ids) { up = true; hp_stats_update(cpu, true); } break; /* cpu speed is up, but skewed - remove one core */ case TEGRA_CPU_SPEED_SKEWED: cpu = tegra_get_slowest_cpu_n(); if (cpu < nr_cpu_ids) { up = false; hp_stats_update(cpu, false); } break; /* cpu speed is up, but under-utilized - do nothing */ case TEGRA_CPU_SPEED_BIASED: default: break; } } queue_delayed_work( hotplug_wq, &hotplug_work, up2gn_delay); break; default: pr_err(CPU_HOTPLUG_TAG"%s: invalid tegra hotplug state %d\n", __func__, hp_state); } mutex_unlock(tegra3_cpu_lock); if (system_state > SYSTEM_RUNNING) { pr_info(CPU_HOTPLUG_TAG" system is not running\n"); } else if (cpu < nr_cpu_ids) { if (up) { updateCurrentCPUTotalActiveTime(); cpu_up(cpu); pr_info(CPU_HOTPLUG_TAG" turn on CPU %d, online CPU 0-3=[%d%d%d%d]\n", cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3)); } else { updateCurrentCPUTotalActiveTime(); cpu_down(cpu); pr_info(CPU_HOTPLUG_TAG" turn off CPU %d, online CPU 0-3=[%d%d%d%d]\n", cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3)); } } } static void tegra_auto_cpuplug_work_func(struct work_struct *work) { bool up = false; unsigned int cpu = nr_cpu_ids; mutex_lock(tegra3_cpu_lock); if (hp_state != TEGRA_HP_DISABLED) { switch (last_state) { case TEGRA_HP_UP: cpu = cpumask_next_zero(0, cpu_online_mask); if (cpu < nr_cpu_ids) { up = true; hp_stats_update(cpu, true); } break; case TEGRA_HP_DOWN: cpu = tegra_get_slowest_cpu_n(); if (cpu < nr_cpu_ids) { up = false; hp_stats_update(cpu, false); } else if (!is_lp_cluster() && !no_lp) { /* Invalid request, why put sth down that is not there? This would cause a NULL pointer dereference in arch/arm/mach-tegra/clock.c: clk_set_parent & clk_get_rate show-p1984, 05.08.12 (MM/DD/YY) */ if (!cpu_clk || !cpu_lp_clk) break; if (!clk_set_parent(cpu_clk, cpu_lp_clk)) { hp_stats_update(CONFIG_NR_CPUS, true); hp_stats_update(0, false); /* catch-up with governor target speed */ tegra_cpu_set_speed_cap(NULL); } else pr_err(CPU_HOTPLUG_TAG" clk_set_parent fail\n"); } break; } } mutex_unlock(tegra3_cpu_lock); if (system_state > SYSTEM_RUNNING) { pr_info(CPU_HOTPLUG_TAG" SYSTEM is not running\n"); } else if (cpu < nr_cpu_ids) { if (up) { updateCurrentCPUTotalActiveTime(); cpu_up(cpu); pr_info(CPU_HOTPLUG_TAG" TURN ON CPU %d, online CPU 0-3=[%d%d%d%d]\n", cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3)); } else { updateCurrentCPUTotalActiveTime(); cpu_down(cpu); pr_info(CPU_HOTPLUG_TAG" TURN OFF CPU %d, online CPU 0-3=[%d%d%d%d]\n", cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3)); } } mutex_lock(tegra3_cpu_lock); is_plugging = false; mutex_unlock(tegra3_cpu_lock); } static int mp_decision(void) { static bool first_call = true; int new_state = TEGRA_HP_IDLE; int nr_cpu_online; int index; unsigned int rq_depth; static cputime64_t total_time = 0; static cputime64_t last_time; cputime64_t current_time; cputime64_t this_time = 0; current_time = ktime_to_ms(ktime_get()); if (first_call) { first_call = false; } else { this_time = current_time - last_time; } total_time += this_time; rq_depth = get_rq_info(); nr_cpu_online = num_online_cpus(); if (nr_cpu_online) { index = (nr_cpu_online - 1) * 2; if ((nr_cpu_online < 4) && (rq_depth >= NwNs_Threshold[index])) { if (total_time >= TwTs_Threshold[index]) { new_state = TEGRA_HP_UP; } } else if (rq_depth <= NwNs_Threshold[index+1]) { if (total_time >= TwTs_Threshold[index+1] ) { new_state = TEGRA_HP_DOWN; } } else { total_time = 0; } } else { total_time = 0; } if (new_state != TEGRA_HP_IDLE) { total_time = 0; } last_time = ktime_to_ms(ktime_get()); return new_state; } void gcpu_plug(unsigned int cpu_freq) { unsigned long up_delay, top_freq, bottom_freq; static bool first_call = true; static cputime64_t total_time = 0; static cputime64_t last_time; cputime64_t current_time; cputime64_t this_time = 0; if (is_plugging) { return; } current_time = ktime_to_ms(ktime_get()); if (first_call) { first_call = false; } else { this_time = current_time - last_time; } total_time += this_time; up_delay = up2gn_delay; top_freq = idle_bottom_freq; bottom_freq = idle_bottom_freq; if (smp_processor_id() == 0) mp_state = mp_decision(); else mp_state = TEGRA_HP_IDLE; switch (hp_state) { case TEGRA_HP_DISABLED: total_time = 0; break; case TEGRA_HP_IDLE: if (cpu_freq > top_freq) { hp_state = TEGRA_HP_UP; } else if (cpu_freq <= bottom_freq) { hp_state = TEGRA_HP_DOWN; } total_time = 0; break; case TEGRA_HP_DOWN: if (cpu_freq > top_freq) { hp_state = TEGRA_HP_UP; total_time = 0; } break; case TEGRA_HP_UP: if (cpu_freq <= bottom_freq) { hp_state = TEGRA_HP_DOWN; total_time = 0; } break; } if (hp_state == TEGRA_HP_UP) { switch (tegra_cpu_speed_balance()) { /* cpu speed is up and balanced - one more on-line */ case TEGRA_CPU_SPEED_BALANCED: if ((total_time >= up_time) && (mp_state == TEGRA_HP_UP)) { is_plugging = true; last_state = TEGRA_HP_UP; queue_work(cpuplug_wq, &cpuplug_work); total_time = 0; } break; /* cpu speed is up, but skewed - remove one core */ case TEGRA_CPU_SPEED_SKEWED: if ((total_time >= down_time) && (mp_state == TEGRA_HP_DOWN)) { is_plugging = true; last_state = TEGRA_HP_DOWN; queue_work(cpuplug_wq, &cpuplug_work); total_time = 0; } break; /* cpu speed is up, but under-utilized - do nothing */ case TEGRA_CPU_SPEED_BIASED: if (total_time >= up_time) total_time = 0; default: break; } } else if (hp_state == TEGRA_HP_DOWN) { if ((total_time >= down_time) && (mp_state == TEGRA_HP_DOWN)) { is_plugging = true; last_state = TEGRA_HP_DOWN; queue_work(cpuplug_wq, &cpuplug_work); total_time = 0; } } last_time = ktime_to_ms(ktime_get()); } static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p) { mutex_lock(tegra3_cpu_lock); if ((n >= 2) && is_lp_cluster()) { /* make sure cpu rate is within g-mode range before switching */ unsigned int speed = max( tegra_getspeed(0), clk_get_min_rate(cpu_g_clk) / 1000); tegra_update_cpu_speed(speed); if (!clk_set_parent(cpu_clk, cpu_g_clk)) { hp_stats_update(CONFIG_NR_CPUS, false); hp_stats_update(0, true); } } /* update governor state machine */ tegra_cpu_set_speed_cap(NULL); mutex_unlock(tegra3_cpu_lock); return NOTIFY_OK; } static struct notifier_block min_cpus_notifier = { .notifier_call = min_cpus_notify, }; void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend) { unsigned long up_delay, top_freq, bottom_freq; if (!is_g_cluster_present()) return; if (suspend && (hp_state != TEGRA_HP_DISABLED)) { hp_state = TEGRA_HP_IDLE; /* Switch to G-mode if suspend rate is high enough */ if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) { if (!clk_set_parent(cpu_clk, cpu_g_clk)) { hp_stats_update(CONFIG_NR_CPUS, false); hp_stats_update(0, true); } } return; } if (is_lp_cluster()) { up_delay = up2g0_delay; top_freq = idle_top_freq; bottom_freq = 0; } else { up_delay = up2gn_delay; top_freq = idle_bottom_freq; bottom_freq = idle_bottom_freq; if (mp_policy) { gcpu_plug(cpu_freq); return; } } /* G cluster: UP/DOWN hysteresis loop */ if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) { if (hp_state != TEGRA_HP_UP) { hp_state = TEGRA_HP_UP; queue_delayed_work( hotplug_wq, &hotplug_work, up_delay); } return; } switch (hp_state) { case TEGRA_HP_DISABLED: break; case TEGRA_HP_IDLE: if (cpu_freq > top_freq) { hp_state = TEGRA_HP_UP; queue_delayed_work( hotplug_wq, &hotplug_work, up_delay); } else if (cpu_freq <= bottom_freq) { hp_state = TEGRA_HP_DOWN; queue_delayed_work( hotplug_wq, &hotplug_work, down_delay); } break; case TEGRA_HP_DOWN: if (cpu_freq > top_freq) { hp_state = TEGRA_HP_UP; queue_delayed_work( hotplug_wq, &hotplug_work, up_delay); } else if (cpu_freq > bottom_freq) { hp_state = TEGRA_HP_IDLE; } break; case TEGRA_HP_UP: if (cpu_freq <= bottom_freq) { hp_state = TEGRA_HP_DOWN; queue_delayed_work( hotplug_wq, &hotplug_work, down_delay); } else if (cpu_freq <= top_freq) { hp_state = TEGRA_HP_IDLE; } break; default: pr_err(CPU_HOTPLUG_TAG" %s: invalid tegra hotplug state %d\n", __func__, hp_state); BUG(); } } int tegra_auto_hotplug_init(struct mutex *cpu_lock) { /* * Not bound to the issuer CPU (=> high-priority), has rescue worker * task, single-threaded, freezable. */ int i = 0; hotplug_wq = alloc_workqueue( "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1); if (!hotplug_wq) return -ENOMEM; INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func); cpuplug_wq = alloc_workqueue( "cpu-tegra3-plug", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1); if (!cpuplug_wq) return -ENOMEM; INIT_WORK(&cpuplug_work, tegra_auto_cpuplug_work_func); cpu_clk = clk_get_sys(NULL, "cpu"); cpu_g_clk = clk_get_sys(NULL, "cpu_g"); cpu_lp_clk = clk_get_sys(NULL, "cpu_lp"); if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk)) return -ENOENT; idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000; idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000; up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS); up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS); down_delay = msecs_to_jiffies(DOWN_DELAY_MS); is_plugging = false; tegra3_cpu_lock = cpu_lock; hp_state = INITIAL_STATE; mp_state = TEGRA_HP_IDLE; hp_init_stats(); pr_info(CPU_HOTPLUG_TAG"Tegra auto-hotplug initialized: %s\n", (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled"); for (i = 0; i <= CONFIG_NR_CPUS; i++) { cpu_hp_active_time_stats[i].this_active_Time = 0; cpu_hp_active_time_stats[i].total_active_Time = 0; } pm_debug_cpu_hotplug = printCPUTotalActiveTime; if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier)) pr_err("%s: Failed to register min cpus PM QoS notifier\n", __func__); return 0; } #ifdef CONFIG_DEBUG_FS static struct dentry *hp_debugfs_root; struct pm_qos_request_list min_cpu_req; struct pm_qos_request_list max_cpu_req; static int hp_stats_show(struct seq_file *s, void *data) { int i; u64 cur_jiffies = get_jiffies_64(); mutex_lock(tegra3_cpu_lock); if (hp_state != TEGRA_HP_DISABLED) { for (i = 0; i <= CONFIG_NR_CPUS; i++) { bool was_up = (hp_stats[i].up_down_count & 0x1); hp_stats_update(i, was_up); } } mutex_unlock(tegra3_cpu_lock); seq_printf(s, "%-15s ", "cpu:"); for (i = 0; i < CONFIG_NR_CPUS; i++) { seq_printf(s, "G%-9d ", i); } seq_printf(s, "LP\n"); seq_printf(s, "%-15s ", "transitions:"); for (i = 0; i <= CONFIG_NR_CPUS; i++) { seq_printf(s, "%-10u ", hp_stats[i].up_down_count); } seq_printf(s, "\n"); seq_printf(s, "%-15s ", "time plugged:"); for (i = 0; i <= CONFIG_NR_CPUS; i++) { seq_printf(s, "%-10llu ", cputime64_to_clock_t(hp_stats[i].time_up_total)); } seq_printf(s, "\n"); seq_printf(s, "%-15s %llu\n", "time-stamp:", cputime64_to_clock_t(cur_jiffies)); return 0; } static int hp_stats_open(struct inode *inode, struct file *file) { return single_open(file, hp_stats_show, inode->i_private); } static const struct file_operations hp_stats_fops = { .open = hp_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int min_cpus_get(void *data, u64 *val) { *val = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS); return 0; } static int min_cpus_set(void *data, u64 val) { pm_qos_update_request(&min_cpu_req, (s32)val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(min_cpus_fops, min_cpus_get, min_cpus_set, "%llu\n"); static int max_cpus_get(void *data, u64 *val) { *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS); return 0; } static int max_cpus_set(void *data, u64 val) { pm_qos_update_request(&max_cpu_req, (s32)val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n"); static int __init tegra_auto_hotplug_debug_init(void) { if (!tegra3_cpu_lock) return -ENOENT; hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL); if (!hp_debugfs_root) return -ENOMEM; pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS, PM_QOS_DEFAULT_VALUE); pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS, PM_QOS_DEFAULT_VALUE); if (!debugfs_create_file( "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops)) goto err_out; if (!debugfs_create_file( "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops)) goto err_out; if (!debugfs_create_file( "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops)) goto err_out; return 0; err_out: debugfs_remove_recursive(hp_debugfs_root); pm_qos_remove_request(&min_cpu_req); pm_qos_remove_request(&max_cpu_req); return -ENOMEM; } late_initcall(tegra_auto_hotplug_debug_init); #endif void tegra_auto_hotplug_exit(void) { destroy_workqueue(hotplug_wq); destroy_workqueue(cpuplug_wq); #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(hp_debugfs_root); pm_qos_remove_request(&min_cpu_req); pm_qos_remove_request(&max_cpu_req); #endif }
gpl-2.0
google-code/aten
src/gui/messages_funcs.cpp
1
1803
/* *** Messages Dock Widget *** src/gui/messages_funcs.cpp Copyright T. Youngs 2007-2015 This file is part of Aten. Aten is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Aten is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Aten. If not, see <http://www.gnu.org/licenses/>. */ #include <QtGui/QCloseEvent> #include "gui/mainwindow.h" #include "gui/messages.h" // Constructor MessagesWidget::MessagesWidget(AtenWindow& parent, Qt::WindowFlags flags) : QDockWidget(&parent, flags), parent_(parent) { ui.setupUi(this); } void MessagesWidget::showWidget() { show(); refresh(); } void MessagesWidget::refresh() { if (!this->isVisible()) return; } // A link in the text browser has been clicked void MessagesWidget::on_MessagesBrowser_anchorClicked(const QUrl &link) { // Attempt to construct a program based on the supplied target URL Program program; if (!program.generateFromString(qPrintable(link.toString()), "MessageLinkCommand", "Message Link Commane")) { Messenger::print("Unable to construct commands from context link."); return; } // Execute the commands ReturnValue rv; program.execute(rv); // Re-focus to the main window canvas // gui.mainCanvas()->setFocus(); ATEN2 TODO parent_.updateWidgets(AtenWindow::AllTarget-AtenWindow::ModelsTarget); } void MessagesWidget::closeEvent(QCloseEvent *event) { event->accept(); }
gpl-2.0
jeffegg/beaglebone
fs/stack.c
1
2659
#include <linux/module.h> #include <linux/fs.h> #include <linux/fs_stack.h> /* does _NOT_ require i_mutex to be held. * * This function cannot be inlined since i_size_{read,write} is rather * heavy-weight on 32-bit systems */ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) { loff_t i_size; blkcnt_t i_blocks; /* * i_size_read() includes its own seqlocking and protection from * preemption (see include/linux/fs.h): we need nothing extra for * that here, and prefer to avoid nesting locks than attempt to keep * i_size and i_blocks in sync together. */ i_size = i_size_read(src); /* * But if CONFIG_LBDAF (on 32-bit), we ought to make an effort to * keep the two halves of i_blocks in sync despite SMP or PREEMPT - * though stat's generic_fillattr() doesn't bother, and we won't be * applying quotas (where i_blocks does become important) at the * upper level. * * We don't actually know what locking is used at the lower level; * but if it's a filesystem that supports quotas, it will be using * i_lock as in inode_add_bytes(). */ if (sizeof(i_blocks) > sizeof(long)) spin_lock(&src->i_lock); i_blocks = src->i_blocks; if (sizeof(i_blocks) > sizeof(long)) spin_unlock(&src->i_lock); /* * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for * fsstack_copy_inode_size() to hold some lock around * i_size_write(), otherwise i_size_read() may spin forever (see * include/linux/fs.h). We don't necessarily hold i_mutex when this * is called, so take i_lock for that case. * * And if CONFIG_LBADF (on 32-bit), continue our effort to keep the * two halves of i_blocks in sync despite SMP or PREEMPT: use i_lock * for that case too, and do both at once by combining the tests. * * There is none of this locking overhead in the 64-bit case. */ if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) spin_lock(&dst->i_lock); i_size_write(dst, i_size); dst->i_blocks = i_blocks; if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) spin_unlock(&dst->i_lock); } EXPORT_SYMBOL_GPL(fsstack_copy_inode_size); /* copy all attributes */ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src) { dest->i_mode = src->i_mode; dest->i_uid = src->i_uid; dest->i_gid = src->i_gid; dest->i_rdev = src->i_rdev; dest->i_atime = src->i_atime; dest->i_mtime = src->i_mtime; dest->i_ctime = src->i_ctime; dest->i_blkbits = src->i_blkbits; dest->i_flags = src->i_flags; dest->i_nlink = src->i_nlink; } EXPORT_SYMBOL_GPL(fsstack_copy_attr_all);
gpl-2.0
camelguo/linux-2.6-trimedia
drivers/crypto/sec2test/testSG.c
1
14055
/**************************************************************************** * testSG.c - Simulates scatter-gather buffer test for SEC2 device driver **************************************************************************** * Copyright (c) 2004-2005 Freescale Semiconductor * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. ***************************************************************************/ /* Revision History: * 1.1.0 Dec 05,2004 sec - prep for linux-compatible driver release * 1.2 02-Feb-2005 sec - fix types, allow for contig buffers in Linux * 1.3 Aug 11,2005 sec - fix up buffer locations */ /* * Note that for Linux targets (or any virtual-memory target for that * matter), this test cannot be run in user-mode, it must be run from * kernel mode only */ #include "sec2drvTest.h" #include "Sec2.h" static const unsigned char iv_in[] = "abcedf01"; static const unsigned char cipherKey[] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0 }; /* Raw input string */ /* Basic string is 33 characters, repeated 48 times for 1584 characters */ /* Broken up into 7 chunks of varying sizes, out of natural order */ /* segment 7 - 364 bytes */ static const unsigned char inp7[] = { "yallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboy" }; /* segment 4 - 64 bytes */ static const unsigned char inp4[] = { "andnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallwo" }; /* segment 2 - 256 bytes */ static const unsigned char inp2[] = { "akesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkan" }; /* segment 1 - 512 bytes */ static const unsigned char inp1[] = { "allworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaym" }; /* segment 3 - 64 bytes */ static const unsigned char inp3[] = { "dnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallwork" }; /* segment 6 - 196 bytes */ static const unsigned char inp6[] = { "llworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullbo" }; /* segment 5 - 128 bytes */ static const unsigned char inp5[] = { "rkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboyallworkandnoplaymakesJackadullboya" }; /* scatter lists */ EXT_SCATTER_ELEMENT inpList[7], outList[8], cmpList[2]; int testSG(int fd) { DES_CBC_CRYPT_REQ encReq; DES_CBC_CRYPT_REQ decReq; int opLen, status, result, cmpfail; unsigned char ctx[8]; /* cleartext input buffer */ unsigned char *in7, *in4, *in2, *in1, *in3, *in6, *in5; /* ciphertext output buffer */ unsigned char *out8, *out1, *out3, *out5, *out2, *out6, *out4, *out7; /* compare buffer */ unsigned char *cmp2, *cmp1; #ifdef _LINUX_USERMODE_ printf("testSG() can't be used from user mode\n"); result = -1; #else printf("\n*** Test SG ***\n"); opLen = 1584; /* overall length */ /* Clear out the request blocks, each direction */ memset(&encReq, 0, sizeof(encReq)); memset(&decReq, 0, sizeof(decReq)); /* Alloc storage space in contiguous (possibly) kernel mem */ in7 = (unsigned char *)malloc(364); in4 = (unsigned char *)malloc(64); in2 = (unsigned char *)malloc(256); in1 = (unsigned char *)malloc(512); in3 = (unsigned char *)malloc(64); in6 = (unsigned char *)malloc(196); in5 = (unsigned char *)malloc(128); out8 = (unsigned char *)malloc(48); out1 = (unsigned char *)malloc(128); out3 = (unsigned char *)malloc(256); out5 = (unsigned char *)malloc(256); out2 = (unsigned char *)malloc(128); out6 = (unsigned char *)malloc(256); out4 = (unsigned char *)malloc(256); out7 = (unsigned char *)malloc(256); cmp2 = (unsigned char *)malloc(688); cmp1 = (unsigned char *)malloc(896); if ((in7 == NULL) || (in4 == NULL) || (in2 == NULL) || (in1 == NULL) || (in3 == NULL) || (in6 == NULL) || (in5 == NULL) || (out8 == NULL) || (out1 == NULL) || (out3 == NULL) || (out5 == NULL) || (out2 == NULL) || (out6 == NULL) || (out4 == NULL) || (out7 == NULL) || (cmp2 == NULL) || (cmp1 == NULL)) { free(in7); free(in4); free(in2); free(in1); free(in3); free(in6); free(in5); free(out8); free(out1); free(out3); free(out5); free(out2); free(out6); free(out4); free(out7); free(cmp2); free(cmp1); } /* copy input blocks to buffer */ memcpy(in7, inp7, 364); memcpy(in4, inp4, 64); memcpy(in2, inp2, 256); memcpy(in1, inp1, 512); memcpy(in3, inp3, 64); memcpy(in6, inp6, 196); memcpy(in5, inp5, 128); /* cleartext input */ inpList[0].size = 512; inpList[0].fragment = (unsigned char *)in1; inpList[0].next = &inpList[1]; inpList[1].size = 256; inpList[1].fragment = (unsigned char *)in2; inpList[1].next = &inpList[2]; inpList[2].size = 64; inpList[2].fragment = (unsigned char *)in3; inpList[2].next = &inpList[3]; inpList[3].size = 64; inpList[3].fragment = (unsigned char *)in4; inpList[3].next = &inpList[4]; inpList[4].size = 128; inpList[4].fragment = (unsigned char *)in5; inpList[4].next = &inpList[5]; inpList[5].size = 196; inpList[5].fragment = (unsigned char *)in6; inpList[5].next = &inpList[6]; inpList[6].size = 364; inpList[6].fragment = (unsigned char *)in7; inpList[6].next = NULL; /* cipher output list (output first pass, input second pass) */ outList[0].size = 128; outList[0].fragment = out1; outList[0].next = &outList[1]; outList[1].size = 128; outList[1].fragment = out2; outList[1].next = &outList[2]; outList[2].size = 256; outList[2].fragment = out3; outList[2].next = &outList[3]; outList[3].size = 256; outList[3].fragment = out4; outList[3].next = &outList[4]; outList[4].size = 256; outList[4].fragment = out5; outList[4].next = &outList[5]; outList[5].size = 256; outList[5].fragment = out6; outList[5].next = &outList[6]; outList[6].size = 256; outList[6].fragment = out7; outList[6].next = &outList[7]; outList[7].size = 48; outList[7].fragment = out8; outList[7].next = NULL; /* cleartext comparison, two chunks */ cmpList[0].size = 896; cmpList[0].fragment = cmp1; cmpList[0].next = &cmpList[1]; cmpList[1].size = 688; cmpList[1].fragment = cmp2; cmpList[1].next = NULL; /* Scatter lists are built for both input and output */ /* Set up the encryption request */ encReq.opId = DPD_TDES_CBC_CTX_ENCRYPT; encReq.inIvBytes = 8; encReq.keyBytes = 24; encReq.inBytes = opLen; encReq.inIvData = (unsigned char *)iv_in; encReq.keyData = (unsigned char *)cipherKey; encReq.inData = (unsigned char *)inpList; encReq.outIvBytes = 8; encReq.outIvData = ctx; encReq.outData = (unsigned char *)outList; MarkScatterBuffer(&encReq, &encReq.inData); MarkScatterBuffer(&encReq, &encReq.outData); status = putKmem(fd, (void *)iv_in, (void **)&encReq.inIvData, encReq.inIvBytes); if (status) return status; status = putKmem(fd, (void *)cipherKey, (void **)&encReq.keyData, encReq.keyBytes); if (status) { freeKmem(fd, (void **)&encReq.inIvData); return status; } status = putKmem(fd, NULL, (void **)&encReq.outIvData, encReq.outIvBytes); if (status) { freeKmem(fd, (void **)&encReq.inIvData); freeKmem(fd, (void **)&encReq.keyData); return status; } armCompletion(&encReq); status = ioctl(fd, IOCTL_PROC_REQ, (int)&encReq); if ((status = waitCompletion("testSG(): DES encryption with scattered input", status, &encReq))) { free(in7); free(in4); free(in2); free(in1); free(in3); free(in6); free(in5); free(out8); free(out1); free(out3); free(out5); free(out2); free(out6); free(out4); free(out7); free(cmp2); free(cmp1); freeKmem(fd, (void **)&encReq.inIvData); freeKmem(fd, (void **)&encReq.keyData); freeKmem(fd, (void **)&encReq.outIvData); return status; } freeKmem(fd, (void **)&encReq.inIvData); freeKmem(fd, (void **)&encReq.keyData); freeKmem(fd, (void **)&encReq.outIvData); /* Now reverse the operation */ /* Set up the decryption request */ decReq.opId = DPD_TDES_CBC_CTX_DECRYPT; decReq.inIvBytes = 8; decReq.keyBytes = 24; decReq.inBytes = opLen; decReq.inIvData = (unsigned char *)iv_in; decReq.keyData = (unsigned char *)cipherKey; decReq.inData = (unsigned char *)outList; decReq.outIvBytes = 8; decReq.outIvData = ctx; decReq.outData = (unsigned char *)cmpList; MarkScatterBuffer(&decReq, &decReq.inData); MarkScatterBuffer(&decReq, &decReq.outData); status = putKmem(fd, (void *)iv_in, (void **)&decReq.inIvData, decReq.inIvBytes); if (status) return status; status = putKmem(fd, (void *)cipherKey, (void **)&decReq.keyData, decReq.keyBytes); if (status) { freeKmem(fd, (void **)&decReq.inIvData); return status; } status = putKmem(fd, NULL, (void **)&decReq.outIvData, decReq.outIvBytes); if (status) { freeKmem(fd, (void **)&decReq.inIvData); freeKmem(fd, (void **)&decReq.keyData); return status; } armCompletion(&decReq); status = ioctl(fd, IOCTL_PROC_REQ, (int)&decReq); if ((status = waitCompletion("testSG(): DES decryption with scattered output", status, &decReq))) { free(in7); free(in4); free(in2); free(in1); free(in3); free(in6); free(in5); free(out8); free(out1); free(out3); free(out5); free(out2); free(out6); free(out4); free(out7); free(cmp2); free(cmp1); freeKmem(fd, (void **)&decReq.inIvData); freeKmem(fd, (void **)&decReq.keyData); freeKmem(fd, (void **)&decReq.outIvData); return status; } freeKmem(fd, (void **)&decReq.inIvData); freeKmem(fd, (void **)&decReq.keyData); freeKmem(fd, (void **)&decReq.outIvData); /* Compare the results */ /* Note that so far, we only encrypt to a buffer, then decrypt to another */ /* buffer with the same key, so the end result should match the input */ /* There is no test of the DES encrypted data, that's done in a separate test */ cmpfail = 0; if (memcmp(inp1, &cmp1[0], 512)) { cmpfail++; printf("compare fragment 1\n"); printf("expected:\n"); dumpm((unsigned char *)inp1, 512); printf("actual:\n"); dumpm(&cmp1[0], 512); } if (memcmp(inp2, &cmp1[512], 256)) { cmpfail++; printf("compare fragment 2\n"); printf("expected:\n"); dumpm((unsigned char *)inp2, 256); printf("actual:\n"); dumpm(&cmp1[512], 256); } if (memcmp(inp3, &cmp1[768], 64)) { cmpfail++; printf("compare fragment 3\n"); printf("expected:\n"); dumpm((unsigned char *)inp3, 64); printf("actual:\n"); dumpm(&cmp1[768], 64); } if (memcmp(inp4, &cmp1[832], 64)) { cmpfail++; printf("compare fragment 4\n"); printf("expected:\n"); dumpm((unsigned char *)inp4, 64); printf("actual:\n"); dumpm(&cmp1[832], 64); } if (memcmp(inp5, &cmp2[0], 128)) { cmpfail++; printf("compare fragment 5\n"); printf("expected:\n"); dumpm((unsigned char *)inp4, 128); printf("actual:\n"); dumpm(&cmp2[0], 128); } if (memcmp(inp6, &cmp2[128], 196)) { cmpfail++; printf("compare fragment 6\n"); printf("expected:\n"); dumpm((unsigned char *)inp6, 196); printf("actual:\n"); dumpm(&cmp2[128], 196); } if (memcmp(inp7, &cmp2[324], 364)) { cmpfail++; printf("compare fragment 7\n"); printf("expected:\n"); dumpm((unsigned char *)inp7, 364); printf("actual:\n"); dumpm(&cmp2[324], 364); } free(in7); free(in4); free(in2); free(in1); free(in3); free(in6); free(in5); free(out8); free(out1); free(out3); free(out5); free(out2); free(out6); free(out4); free(out7); free(cmp2); free(cmp1); if (cmpfail) { printf("*** Test SG: data compare error in %d fragments, test failure ***\n", cmpfail); result = -1; } else { printf("*** Test SG passed ***\n"); result = 0; } #endif /* not usermode */ return(result); }
gpl-2.0
drazenzadravec/nequeo
Tools/Math/MathGL/mathgl-2.2.2.1/mgllab/mathgl.cpp
1
4683
/* mathgl.cpp is part of UDAV * Copyright (C) 2007-2014 Alexey Balakin <mathgl.abalakin@gmail.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mgl2/mgl.h" #include "udav.h" //----------------------------------------------------------------------------- #include "xpm/alpha.xpm" #include "xpm/light.xpm" #include "xpm/alpha_on.xpm" #include "xpm/light_on.xpm" #include "xpm/zoom-fit-best.xpm" #include "xpm/zoom-fit-best-r.xpm" #include "xpm/film-r.xpm" #include "xpm/film-b.xpm" #include "xpm/media-seek-forward.xpm" #include "xpm/media-seek-backward.xpm" #include "xpm/go-previous.xpm" #include "xpm/go-next.xpm" #include "xpm/go-down.xpm" #include "xpm/zoom-out.xpm" #include "xpm/zoom-in.xpm" #include "xpm/go-up.xpm" #include "xpm/zoom-original.xpm" #include "xpm/view-refresh.xpm" #include "xpm/rotate.xpm" #include "xpm/rotate_on.xpm" #include "xpm/document-properties.xpm" //#include "xpm/preferences-system.xpm" #include "xpm/wire.xpm" //----------------------------------------------------------------------------- extern int internal_font; mglParse *Parse=0; //----------------------------------------------------------------------------- void udav_error(const char *Message, void *v) { ((Fl_MGL*)v)->status->label(Message); } mreal udav_delay(void *v) { return ((Fl_MGL*)v)->AnimDelay; } void udav_reload(void *v) { Parse->RestoreOnce(); ((Fl_MGL*)v)->update(); } //----------------------------------------------------------------------------- void udav_next(void *v) { ((Fl_MGL*)v)->next_frame(); } void Fl_MGL::next_frame() { if(NArgs==0) { animate_cb(this,this); if(NArgs==0) return; } ArgCur = (ArgCur+1) % NArgs; Parse->AddParam(0,Args[ArgCur]); update(); } //----------------------------------------------------------------------------- void udav_prev(void *v) { ((Fl_MGL*)v)->prev_frame(); } void Fl_MGL::prev_frame() { if(NArgs==0) { animate_cb(this,this); if(NArgs==0) return; } ArgCur = ArgCur>0 ? ArgCur-1 : NArgs-1; Parse->AddParam(0,Args[ArgCur]); update(); } //----------------------------------------------------------------------------- Fl_MGL::Fl_MGL(int x, int y, int w, int h, const char *label) : Fl_MGLView(x,y,w,h,label) { if(!Parse) Parse = new mglParse; Parse->AllowSetSize(true); ArgBuf = 0; NArgs = ArgCur = 0; script = script_pre = 0; par = this; next = udav_next; delay = udav_delay; prev = udav_prev; reload = udav_reload; /*#ifdef WIN32 // setlocale(LC_TYPE,"russian_Russia.CP1251"); char *path; get_doc_dir(path); if(!FMGL->GetFont()->Load("STIX",path && path[0] ? path : ".")) FMGL->GetFont()->Restore(); free(path); #endif*/ } //----------------------------------------------------------------------------- Fl_MGL::~Fl_MGL() { clear_scripts(); if(ArgBuf) delete []ArgBuf; } //----------------------------------------------------------------------------- void Fl_MGL::clear_scripts() { if(script) free(script); if(script_pre) free(script_pre); } //----------------------------------------------------------------------------- void Fl_MGL::scripts(char *scr, char *pre) { clear_scripts(); script=scr; script_pre=pre; } //----------------------------------------------------------------------------- int Fl_MGL::Draw(mglGraph *gr) { Parse->Execute(gr,script_pre); Parse->Execute(gr,script); status->label(gr->Message()); return 0; } //----------------------------------------------------------------------------- void Fl_MGL::update() { // NOTE: hint for old style View(). May be I should remove it! if(!script || !strstr(script,"rotate")) mgl_rotate(FMGL->get_graph(),0,0,0); Fl_MGLView::update(); mglVar *v = Parse->FindVar(""); while(v) { if(v->o) ((TableWindow *)v->o)->update(v); v = v->next; } } //----------------------------------------------------------------------------- void add_suffix(char *fname, const char *ext) { long n=strlen(fname); if(n>4 && fname[n-4]=='.') { fname[n-3]=ext[0]; fname[n-2]=ext[1]; fname[n-1]=ext[2]; } else { strcat(fname,"."); strcat(fname,ext); } } //-----------------------------------------------------------------------------
gpl-2.0
ultrasystem/kernel
drivers/media/video/samsung/mfc5x/mfc_dec.c
1
63846
/* * linux/drivers/media/video/samsung/mfc5x/mfc_dec.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Decoder interface for Samsung MFC (Multi Function Codec - FIMV) driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm/cacheflush.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/sysfs.h> #ifdef CONFIG_BUSFREQ_OPP #include <plat/cpu.h> #include <mach/busfreq_exynos4.h> #define HD_MOVIE_SIZE_MULTIPLY_WIDTH_HEIGHT (1281*721) #endif #if defined(CONFIG_BUSFREQ) || defined(CONFIG_EXYNOS4_CPUFREQ) #include <mach/cpufreq.h> #endif #include <mach/regs-mfc.h> #include "mfc_dec.h" #include "mfc_cmd.h" #include "mfc_log.h" #include "mfc_shm.h" #include "mfc_reg.h" #include "mfc_mem.h" #include "mfc_buf.h" #undef DUMP_STREAM #ifdef DUMP_STREAM #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/file.h> static void mfc_fw_debug(void); static void dump_stream(unsigned long address, unsigned int size); #endif static LIST_HEAD(mfc_decoders); #if 0 #define MPEG4_START_CODE_PREFIX_SIZE 3 #define MPEG4_START_CODE_PREFIX 0x000001 #define MPEG4_START_CODE_MASK 0x000000FF static int find_mpeg4_startcode(unsigned long addr, unsigned int size) { unsigned char *data; unsigned int i = 0; /* FIXME: optimize cache operation size */ mfc_mem_cache_inv((void *)addr, size); /* FIXME: optimize matching algorithm */ data = (unsigned char *)addr; for (i = 0; i < (size - MPEG4_START_CODE_PREFIX_SIZE); i++) { if ((data[i] == 0x00) && (data[i + 1] == 0x00) && (data[i + 2] == 0x01)) return i; } return -1; } static int check_vcl(unsigned long addr, unsigned int size) { return -1; } #endif #ifdef DUMP_STREAM static void mfc_fw_debug(void) { mfc_err("============= MFC FW Debug (Ver: 0x%08x) ================\n", read_reg(0x58)); mfc_err("== (0x64: 0x%08x) (0x68: 0x%08x) (0xE4: 0x%08x) \ (0xE8: 0x%08x)\n", read_reg(0x64), read_reg(0x68), read_reg(0xe4), read_reg(0xe8)); mfc_err("== (0xF0: 0x%08x) (0xF4: 0x%08x) (0xF8: 0x%08x) \ (0xFC: 0x%08x)\n", read_reg(0xf0), read_reg(0xf4), read_reg(0xf8), read_reg(0xfc)); } static void dump_stream(unsigned long address, unsigned int size) { int i, j; struct file *file; loff_t pos = 0; int fd; unsigned long addr = (unsigned long) phys_to_virt(address); mm_segment_t old_fs; char filename[] = "/data/mfc_decinit_instream.raw"; printk(KERN_INFO "---- start stream dump ----\n"); printk(KERN_INFO "size: 0x%04x\n", size); for (i = 0; i < size; i += 16) { mfc_dbg("0x%04x: ", i); if ((size - i) >= 16) { for (j = 0; j < 16; j++) mfc_dbg("0x%02x ", (u8)(*(u8 *)(addr + i + j))); } else { for (j = 0; j < (size - i); j++) mfc_dbg("0x%02x ", (u8)(*(u8 *)(addr + i + j))); } mfc_dbg("\n"); } printk(KERN_INFO "---- end stream dump ----\n"); old_fs = get_fs(); set_fs(KERNEL_DS); fd = sys_open(filename, O_WRONLY|O_CREAT, 0644); if (fd >= 0) { sys_write(fd, (u8 *)addr, size); file = fget(fd); if (file) { vfs_write(file, (u8 *)addr, size, &pos); fput(file); } sys_close(fd); } else { mfc_err("........Open fail : %d\n", fd); } set_fs(old_fs); } #endif /* * [1] alloc_ctx_buf() implementations */ static int alloc_ctx_buf(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; alloc = _mfc_alloc_buf(ctx, MFC_CTX_SIZE, ALIGN_2KB, MBT_CTX | PORT_A); if (alloc == NULL) { mfc_err("failed alloc context buffer\n"); return -1; } ctx->ctxbufofs = mfc_mem_base_ofs(alloc->real) >> 11; ctx->ctxbufsize = alloc->size; memset((void *)alloc->addr, 0, alloc->size); mfc_mem_cache_clean((void *)alloc->addr, alloc->size); return 0; } static int h264_alloc_ctx_buf(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; alloc = _mfc_alloc_buf(ctx, MFC_CTX_SIZE_L, ALIGN_2KB, MBT_CTX | PORT_A); if (alloc == NULL) { mfc_err("failed alloc context buffer\n"); return -1; } ctx->ctxbufofs = mfc_mem_base_ofs(alloc->real) >> 11; ctx->ctxbufsize = alloc->size; memset((void *)alloc->addr, 0, alloc->size); mfc_mem_cache_clean((void *)alloc->addr, alloc->size); return 0; } /* * [2] alloc_desc_buf() implementations */ static int alloc_desc_buf(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; /* FIXME: size fixed? */ alloc = _mfc_alloc_buf(ctx, MFC_DESC_SIZE, ALIGN_2KB, MBT_DESC | PORT_A); if (alloc == NULL) { mfc_err("failed alloc descriptor buffer\n"); return -1; } ctx->descbufofs = mfc_mem_base_ofs(alloc->real) >> 11; /* FIXME: size fixed? */ ctx->descbufsize = MFC_DESC_SIZE; return 0; } /* * [3] pre_seq_start() implementations */ static int pre_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; unsigned reg; /* slice interface */ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL); if (dec_ctx->slice) reg |= (1 << 31); else reg &= ~(1 << 31); write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL); return 0; } static int h264_pre_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; unsigned int reg; pre_seq_start(ctx); /* display delay */ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL); if (h264->dispdelay_en > 0) { /* enable */ reg |= (1 << 30); /* value */ reg &= ~(0x3FFF << 16); reg |= ((h264->dispdelay_val & 0x3FFF) << 16); } else { /* disable & value clear */ reg &= ~(0x7FFF << 16); } write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL); write_shm(ctx, h264->sei_parse, SEI_ENABLE); return 0; } static int mpeg4_pre_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv; unsigned int reg; pre_seq_start(ctx); /* loop filter, this register can be used by both decoders & encoders */ reg = read_reg(MFC_ENC_LF_CTRL); if (mpeg4->postfilter) reg |= (1 << 0); else reg &= ~(1 << 0); write_reg(reg, MFC_ENC_LF_CTRL); return 0; } static int fimv1_pre_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_fimv1 *fimv1 = (struct mfc_dec_fimv1 *)dec_ctx->d_priv; pre_seq_start(ctx); /* set width, height for FIMV1 */ write_reg(fimv1->width, MFC_SI_FIMV1_HRESOL); write_reg(fimv1->height, MFC_SI_FIMV1_VRESOL); return 0; } /* * [4] post_seq_start() implementations */ static int post_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; unsigned int shm; /* CHKME: case of FIMV1 */ ctx->width = read_reg(MFC_SI_HRESOL); ctx->height = read_reg(MFC_SI_VRESOL); dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER); dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb; shm = read_shm(ctx, DISP_PIC_PROFILE); dec_ctx->level = (shm >> 8) & 0xFF; dec_ctx->profile = shm & 0x1F; return 0; } static int h264_post_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; unsigned int shm; /* post_seq_start(ctx); */ ctx->width = read_reg(MFC_SI_HRESOL); ctx->height = read_reg(MFC_SI_VRESOL); dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER); dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb; mfc_dbg("nummindpb: %d, numextradpb: %d\n", dec_ctx->nummindpb, dec_ctx->numextradpb); shm = read_shm(ctx, DISP_PIC_PROFILE); dec_ctx->level = (shm >> 8) & 0xFF; dec_ctx->profile = shm & 0x1F; /* FIXME: consider it */ /* h264->dispdelay_en > 0 if (dec_ctx->numtotaldpb < h264->dispdelay_val) dec_ctx->numtotaldpb = h264->dispdelay_val; */ h264->crop_r_ofs = (read_shm(ctx, CROP_INFO1) >> 16) & 0xFFFF; h264->crop_l_ofs = read_shm(ctx, CROP_INFO1) & 0xFFFF; h264->crop_b_ofs = (read_shm(ctx, CROP_INFO2) >> 16) & 0xFFFF; h264->crop_t_ofs = read_shm(ctx, CROP_INFO2) & 0xFFFF; return 0; } static int mpeg4_post_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv; unsigned int shm; /* post_seq_start(ctx); */ ctx->width = read_reg(MFC_SI_HRESOL); ctx->height = read_reg(MFC_SI_VRESOL); dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER); dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb; shm = read_shm(ctx, DISP_PIC_PROFILE); dec_ctx->level = (shm >> 8) & 0xFF; dec_ctx->profile = shm & 0x1F; mpeg4->aspect_ratio = read_shm(ctx, ASPECT_RATIO_INFO) & 0xF; if (mpeg4->aspect_ratio == 0xF) { shm = read_shm(ctx, EXTENDED_PAR); mpeg4->ext_par_width = (shm >> 16) & 0xFFFF; mpeg4->ext_par_height = shm & 0xFFFF; } else { mpeg4->ext_par_width = 0; mpeg4->ext_par_height = 0; } return 0; } static int vc1_post_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; unsigned int shm; /* post_seq_start(ctx); */ ctx->width = read_reg(MFC_SI_HRESOL); ctx->height = read_reg(MFC_SI_VRESOL); dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER); dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb; shm = read_shm(ctx, DISP_PIC_PROFILE); dec_ctx->level = (shm >> 8) & 0xFF; dec_ctx->profile = shm & 0x1F; return 0; } static int fimv1_post_seq_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_fimv1 *fimv1 = (struct mfc_dec_fimv1 *)dec_ctx->d_priv; unsigned int shm; /* post_seq_start(ctx); */ ctx->width = read_reg(MFC_SI_HRESOL); ctx->height = read_reg(MFC_SI_VRESOL); dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER); dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb; shm = read_shm(ctx, DISP_PIC_PROFILE); dec_ctx->level = (shm >> 8) & 0xFF; dec_ctx->profile = shm & 0x1F; fimv1->aspect_ratio = read_shm(ctx, ASPECT_RATIO_INFO) & 0xF; if (fimv1->aspect_ratio == 0xF) { shm = read_shm(ctx, EXTENDED_PAR); fimv1->ext_par_width = (shm >> 16) & 0xFFFF; fimv1->ext_par_height = shm & 0xFFFF; } else { fimv1->ext_par_width = 0; fimv1->ext_par_height = 0; } return 0; } /* * [5] set_init_arg() implementations */ static int set_init_arg(struct mfc_inst_ctx *ctx, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_init_arg *dec_init_arg = (struct mfc_dec_init_arg *)arg; dec_init_arg->out_frm_width = ctx->width; dec_init_arg->out_frm_height = ctx->height; dec_init_arg->out_buf_width = ALIGN(ctx->width, ALIGN_W); dec_init_arg->out_buf_height = ALIGN(ctx->height, ALIGN_H); dec_init_arg->out_dpb_cnt = dec_ctx->numtotaldpb; return 0; } static int h264_set_init_arg(struct mfc_inst_ctx *ctx, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; struct mfc_dec_init_arg *dec_init_arg = (struct mfc_dec_init_arg *)arg; set_init_arg(ctx, arg); dec_init_arg->out_crop_right_offset = h264->crop_r_ofs; dec_init_arg->out_crop_left_offset = h264->crop_l_ofs; dec_init_arg->out_crop_bottom_offset = h264->crop_b_ofs; dec_init_arg->out_crop_top_offset = h264->crop_t_ofs; return 0; } static int mpeg4_set_init_arg(struct mfc_inst_ctx *ctx, void *arg) { /* struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv; struct mfc_dec_init_arg *dec_init_arg = (struct mfc_dec_init_arg *)arg; */ set_init_arg(ctx, arg); /* dec_init_arg->out_aspect_ratio = mpeg4->aspect_ratio; dec_init_arg->out_ext_par_width = mpeg4->ext_par_width; dec_init_arg->out_ext_par_height = mpeg4->ext_par_height; */ return 0; } /* * [6] set_codec_bufs() implementations */ static int set_codec_bufs(struct mfc_inst_ctx *ctx) { return 0; } static int h264_set_codec_bufs(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_VERT_NB_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBIP_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_VERT_NB_IP_ADR); return 0; } static int vc1_set_codec_bufs(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBDCAC_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NB_DCAC_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_UPNBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_NB_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_SAMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SA_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_OTLINE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_OT_LINE_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_BITPLANE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_BITPLANE3_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_BITPLANE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_BITPLANE2_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_BITPLANE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_BITPLANE1_ADR); return 0; } static int mpeg4_set_codec_bufs(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBDCAC_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NB_DCAC_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_UPNBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_NB_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_SAMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SA_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_OTLINE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_OT_LINE_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_SYNPAR_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SP_ADR); return 0; } static int h263_set_codec_bufs(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBDCAC_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NB_DCAC_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_UPNBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_NB_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_SAMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SA_MV_ADR); alloc = _mfc_alloc_buf(ctx, MFC_DEC_OTLINE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_CODEC); mfc_err("failed alloc codec buffer\n"); return -1; } write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_OT_LINE_ADR); return 0; } /* * [7] set_dpbs() implementations */ static int set_dpbs(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; int i; unsigned int reg; struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; /* width: 128B align, height: 32B align, size: 8KB align */ /* add some guard buffers to luma & chroma */ dec_ctx->lumasize = ALIGN(ctx->width + 24, ALIGN_W) * ALIGN(ctx->height + 16, ALIGN_H); dec_ctx->lumasize = ALIGN(dec_ctx->lumasize, ALIGN_8KB); dec_ctx->chromasize = ALIGN(ctx->width + 16, ALIGN_W) * ALIGN((ctx->height >> 1) + 4, ALIGN_H); dec_ctx->chromasize = ALIGN(dec_ctx->chromasize, ALIGN_8KB); for (i = 0; i < dec_ctx->numtotaldpb; i++) { /* * allocate chroma buffer */ #ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, \ ALIGN_4KB, MBT_DPB | PORT_A); #else alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, ALIGN_2KB, MBT_DPB | PORT_A); #endif if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_DPB); mfc_err("failed alloc chroma buffer\n"); return -1; } /* clear first DPB chroma buffer, referrence buffer for vectors starting with p-frame */ #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if ((i == 0) && (!ctx->drm_flag)) { #else if (i == 0) { #endif memset((void *)alloc->addr, 0x80, alloc->size); mfc_mem_cache_clean((void *)alloc->addr, alloc->size); } /* * set chroma buffer address */ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_CHROMA_ADR + (4 * i)); /* * allocate luma buffer */ #ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, \ ALIGN_4KB, MBT_DPB | PORT_B); #else alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, ALIGN_2KB, MBT_DPB | PORT_B); #endif if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_DPB); mfc_err("failed alloc luma buffer\n"); return -1; } /* clear first DPB luma buffer, referrence buffer for vectors starting with p-frame */ #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if ((i == 0) && (!ctx->drm_flag)) { #else if (i == 0) { #endif memset((void *)alloc->addr, 0x0, alloc->size); mfc_mem_cache_clean((void *)alloc->addr, alloc->size); } /* * set luma buffer address */ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_LUMA_ADR + (4 * i)); } write_shm(ctx, dec_ctx->lumasize, ALLOCATED_LUMA_DPB_SIZE); write_shm(ctx, dec_ctx->chromasize, ALLOCATED_CHROMA_DPB_SIZE); write_shm(ctx, 0, ALLOCATED_MV_SIZE); /* set DPB number */ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL); reg &= ~(0x3FFF); reg |= dec_ctx->numtotaldpb; write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL); return 0; } static int h264_set_dpbs(struct mfc_inst_ctx *ctx) { struct mfc_alloc_buffer *alloc; int i; unsigned int reg; struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; /* width: 128B align, height: 32B align, size: 8KB align */ dec_ctx->lumasize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height, ALIGN_H); dec_ctx->lumasize = ALIGN(dec_ctx->lumasize, ALIGN_8KB); dec_ctx->chromasize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height >> 1, ALIGN_H); dec_ctx->chromasize = ALIGN(dec_ctx->chromasize, ALIGN_8KB); h264->mvsize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height >> 2, ALIGN_H); h264->mvsize = ALIGN(h264->mvsize, ALIGN_8KB); for (i = 0; i < dec_ctx->numtotaldpb; i++) { /* * allocate chroma buffer */ #ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, \ ALIGN_4KB, MBT_DPB | PORT_A); #else alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, ALIGN_2KB, MBT_DPB | PORT_A); #endif if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_DPB); mfc_err("failed alloc chroma buffer\n"); return -1; } /* clear last DPB chroma buffer, referrence buffer for vectors starting with p-frame */ #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if ((i == (dec_ctx->numtotaldpb - 1)) && (!ctx->drm_flag)) { #else if (i == (dec_ctx->numtotaldpb - 1)) { #endif memset((void *)alloc->addr, 0x80, alloc->size); mfc_mem_cache_clean((void *)alloc->addr, alloc->size); } /* * set chroma buffer address */ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_CHROMA_ADR + (4 * i)); /* * allocate luma buffer */ #ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, \ ALIGN_4KB, MBT_DPB | PORT_B); #else alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, ALIGN_2KB, MBT_DPB | PORT_B); #endif if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_DPB); mfc_err("failed alloc luma buffer\n"); return -1; } /* clear last DPB luma buffer, referrence buffer for vectors starting with p-frame */ #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if ((i == (dec_ctx->numtotaldpb - 1)) && (!ctx->drm_flag)) { #else if (i == (dec_ctx->numtotaldpb - 1)) { #endif memset((void *)alloc->addr, 0x0, alloc->size); mfc_mem_cache_clean((void *)alloc->addr, alloc->size); } /* * set luma buffer address */ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_LUMA_ADR + (4 * i)); /* * allocate mv buffer */ alloc = _mfc_alloc_buf(ctx, h264->mvsize, ALIGN_2KB, MBT_DPB | PORT_B); if (alloc == NULL) { mfc_free_buf_type(ctx->id, MBT_DPB); mfc_err("failed alloc mv buffer\n"); return -1; } /* * set mv buffer address */ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_MV_ADR + (4 * i)); } write_shm(ctx, dec_ctx->lumasize, ALLOCATED_LUMA_DPB_SIZE); write_shm(ctx, dec_ctx->chromasize, ALLOCATED_CHROMA_DPB_SIZE); write_shm(ctx, h264->mvsize, ALLOCATED_MV_SIZE); /* set DPB number */ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL); reg &= ~(0x3FFF); reg |= dec_ctx->numtotaldpb; write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL); return 0; } /* * [8] pre_frame_start() implementations */ static int pre_frame_start(struct mfc_inst_ctx *ctx) { return 0; } /* * [9] post_frame_start() implementations */ static int post_frame_start(struct mfc_inst_ctx *ctx) { return 0; } static int h264_post_frame_start(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; unsigned int shm; /* h264->sei_parse */ h264->fp.available = read_shm(ctx, FRAME_PACK_SEI_AVAIL) & 0x1; if (h264->fp.available) { h264->fp.arrangement_id = read_shm(ctx, FRAME_PACK_ARRGMENT_ID); shm = read_shm(ctx, FRAME_PACK_DEC_INFO); h264->fp.arrangement_cancel_flag = (shm >> 0) & 0x1; h264->fp.arrangement_type = (shm >> 1) & 0x7F; h264->fp.quincunx_sampling_flag = (shm >> 8) & 0x1; h264->fp.content_interpretation_type = (shm >> 9) & 0x3F; h264->fp.spatial_flipping_flag = (shm >> 15) & 0x1; h264->fp.frame0_flipped_flag = (shm >> 16) & 0x1; h264->fp.field_views_flag = (shm >> 17) & 0x1; h264->fp.current_frame_is_frame0_flag = (shm >> 18) & 0x1; shm = read_shm(ctx, FRAME_PACK_GRID_POS); h264->fp.frame0_grid_pos_x = (shm >> 0) & 0xF; h264->fp.frame0_grid_pos_y = (shm >> 4) & 0xF; h264->fp.frame1_grid_pos_x = (shm >> 8) & 0xF; h264->fp.frame1_grid_pos_y = (shm >> 12) & 0xF; } else { memset((void *)&h264->fp, 0x00, sizeof(struct mfc_frame_packing)); } return 0; } /* * [10] multi_frame_start() implementations */ static int multi_data_frame(struct mfc_inst_ctx *ctx) { return 0; } static int mpeg4_multi_data_frame(struct mfc_inst_ctx *ctx) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv; if (!mpeg4->packedpb) return 0; /* FIXME: I_FRAME is valid? */ if ((dec_ctx->decframetype == DEC_FRM_I) || (dec_ctx->decframetype == DEC_FRM_P)) { } return 0; } /* * [11] set_exe_arg() implementations */ static int set_exe_arg(struct mfc_inst_ctx *ctx, void *arg) { /* struct mfc_dec_exe_arg *dec_exe_arg = (struct mfc_dec_exe_arg *)arg; */ return 0; } /* * [12] get_codec_cfg() implementations */ static int get_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg) { /*struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;*/ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg; int ret = 0; mfc_dbg("type: 0x%08x", type); /* MFC_DEC_GETCONF_CRC_DATA = DEC_GET, MFC_DEC_GETCONF_BUF_WIDTH_HEIGHT MFC_DEC_GETCONF_FRAME_TAG, MFC_DEC_GETCONF_PIC_TIME, MFC_DEC_GETCONF_ASPECT_RATIO: MFC_DEC_GETCONF_EXTEND_PAR: */ switch (type) { case MFC_DEC_GETCONF_CRC_DATA: usercfg->basic.values[0] = 0x12; usercfg->basic.values[1] = 0x34; usercfg->basic.values[2] = 0x56; usercfg->basic.values[3] = 0x78; break; default: mfc_dbg("not common cfg, try to codec specific: 0x%08x\n", type); ret = 1; break; } return ret; } static int h264_get_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg; int ret = 0; mfc_dbg("type: 0x%08x", type); ret = get_codec_cfg(ctx, type, arg); if (ret <= 0) return ret; switch (type) { case MFC_DEC_GETCONF_FRAME_PACKING: if (ctx->state < INST_STATE_EXE) { mfc_dbg("invalid instance state: 0x%08x\n", type); return MFC_STATE_INVALID; } memcpy(&usercfg->frame_packing, &h264->fp, sizeof(struct mfc_frame_packing)); break; default: mfc_err("invalid get config type: 0x%08x\n", type); ret = -2; break; } return ret; } /* * [13] set_codec_cfg() implementations */ static int set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg; int ret = 0; mfc_dbg("type: 0x%08x", type); /* MFC_DEC_SETCONF_FRAME_TAG, ... */ switch (type) { /* case MFC_DEC_SETCONF_EXTRA_BUFFER_NUM: if (ctx->state >= INST_STATE_INIT) return MFC_STATE_INVALID; if ((usercfg->basic.values[0] >= 0) && (usercfg->basic.values[0] <= MFC_MAX_EXTRA_DPB)) { dec_ctx->numextradpb = usercfg->basic.values[0]; } else { dec_ctx->numextradpb = MFC_MAX_EXTRA_DPB; mfc_warn("invalid extra dpb buffer number: %d", usercfg->basic.values[0]); mfc_warn("set %d by default", MFC_MAX_EXTRA_DPB); } break; */ case MFC_DEC_SETCONF_IS_LAST_FRAME: mfc_dbg("ctx->state: 0x%08x", ctx->state); if (ctx->state < INST_STATE_EXE) { mfc_dbg("invalid instance state: 0x%08x\n", type); return MFC_STATE_INVALID; } if (usercfg->basic.values[0] > 0) dec_ctx->lastframe = 1; else dec_ctx->lastframe = 0; break; /* case MFC_DEC_SETCONF_SLICE_ENABLE: if (ctx->state >= INST_STATE_INIT) return MFC_STATE_INVALID; if (usercfg->basic.values[0] > 0) dec_ctx->slice = 1; else dec_ctx->slice = 0; break; */ /* case MFC_DEC_SETCONF_CRC_ENABLE: if (ctx->state >= INST_STATE_INIT) return MFC_STATE_INVALID; if (usercfg->basic.values[0] > 0) dec_ctx->crc = 1; else dec_ctx->crc = 0; break; */ case MFC_DEC_SETCONF_DPB_FLUSH: if (ctx->state < INST_STATE_EXE) { mfc_dbg("invalid instance state: 0x%08x\n", type); return MFC_STATE_INVALID; } if (usercfg->basic.values[0] > 0) { dec_ctx->dpbflush = 1; } break; default: mfc_dbg("not common cfg, try to codec specific: 0x%08x\n", type); ret = 1; break; } return ret; } static int h264_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv; union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg; int ret; mfc_dbg("type: 0x%08x", type); ret = set_codec_cfg(ctx, type, arg); if (ret <= 0) return ret; ret = 0; switch (type) { case MFC_DEC_SETCONF_DISPLAY_DELAY: if (ctx->state >= INST_STATE_INIT) { mfc_dbg("invalid instance state: 0x%08x\n", type); return MFC_STATE_INVALID; } h264->dispdelay_en = 1; if ((usercfg->basic.values[0] >= 0) && (usercfg->basic.values[0] <= MFC_MAX_DISP_DELAY)) { h264->dispdelay_val = usercfg->basic.values[0]; } else { h264->dispdelay_val = MFC_MAX_DISP_DELAY; mfc_warn("invalid diplay delay count: %d", usercfg->basic.values[0]); mfc_warn("set %d by default", MFC_MAX_DISP_DELAY); } break; case MFC_DEC_SETCONF_SEI_PARSE: mfc_dbg("ctx->state: 0x%08x", ctx->state); if (ctx->state >= INST_STATE_INIT) { mfc_dbg("invalid instance state: 0x%08x\n", type); return MFC_STATE_INVALID; } if (usercfg->basic.values[0] > 0) h264->sei_parse = 1; else h264->sei_parse = 0; break; default: mfc_err("invalid set cfg type: 0x%08x\n", type); ret = -2; break; } return ret; } static int mpeg4_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv; union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg; int ret; mfc_dbg("type: 0x%08x", type); ret = set_codec_cfg(ctx, type, arg); if (ret <= 0) return ret; ret = 0; switch (type) { case MFC_DEC_SETCONF_POST_ENABLE: if (ctx->state >= INST_STATE_INIT) return MFC_STATE_INVALID; if (usercfg->basic.values[0] > 0) mpeg4->postfilter = 1; else mpeg4->postfilter = 0; break; /* JYSHIN case MFC_DEC_SETCONF_PACKEDPB: if (ctx->state < INST_STATE_OPEN) return -1; if (usercfg->basic.values[0] > 0) mpeg4->packedpb = 1; else mpeg4->packedpb = 1; break; */ default: mfc_err("invalid set cfg type: 0x%08x\n", type); ret = -2; break; } return ret; } static int fimv1_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg) { struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; struct mfc_dec_fimv1 *fimv1 = (struct mfc_dec_fimv1 *)dec_ctx->d_priv; union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg; int ret; mfc_dbg("type: 0x%08x", type); ret = set_codec_cfg(ctx, type, arg); if (ret <= 0) return ret; ret = 0; switch (type) { case MFC_DEC_SETCONF_FIMV1_WIDTH_HEIGHT: if (ctx->state >= INST_STATE_INIT) return MFC_STATE_INVALID; fimv1->width = usercfg->basic.values[0]; fimv1->height = usercfg->basic.values[1]; break; /* JYSHIN case MFC_DEC_SETCONF_PACKEDPB: if (ctx->state < INST_STATE_OPEN) return -1; if (usercfg->basic.[0] > 0) fimv1->packedpb = 1; else fimv1->packedpb = 1; break; */ default: mfc_err("invalid set cfg type: 0x%08x\n", type); ret = -2; break; } return ret; } static struct mfc_dec_info unknown_dec = { .name = "UNKNOWN", .codectype = UNKNOWN_TYPE, .codecid = -1, .d_priv_size = 0, /* * The unknown codec operations will be not call, * unused default operations raise build warning. */ .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = pre_seq_start, .post_seq_start = post_seq_start, .set_init_arg = set_init_arg, .set_codec_bufs = set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = pre_frame_start, .post_frame_start = post_frame_start, .multi_data_frame = multi_data_frame, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = set_codec_cfg, }, }; static struct mfc_dec_info h264_dec = { .name = "H264", .codectype = H264_DEC, .codecid = 0, .d_priv_size = sizeof(struct mfc_dec_h264), .c_ops = { .alloc_ctx_buf = h264_alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = h264_pre_seq_start, .post_seq_start = h264_post_seq_start, .set_init_arg = h264_set_init_arg, .set_codec_bufs = h264_set_codec_bufs, .set_dpbs = h264_set_dpbs, .pre_frame_start = NULL, .post_frame_start = h264_post_frame_start, .multi_data_frame = NULL, .set_exe_arg = set_exe_arg, .get_codec_cfg = h264_get_codec_cfg, .set_codec_cfg = h264_set_codec_cfg, }, }; static struct mfc_dec_info vc1_dec = { .name = "VC1", .codectype = VC1_DEC, .codecid = 1, .d_priv_size = 0, .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = pre_seq_start, .post_seq_start = vc1_post_seq_start, .set_init_arg = set_init_arg, .set_codec_bufs = vc1_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = set_codec_cfg, }, }; static struct mfc_dec_info mpeg4_dec = { .name = "MPEG4", .codectype = MPEG4_DEC, .codecid = 2, .d_priv_size = sizeof(struct mfc_dec_mpeg4), .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = mpeg4_pre_seq_start, .post_seq_start = mpeg4_post_seq_start, .set_init_arg = mpeg4_set_init_arg, .set_codec_bufs = mpeg4_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, /* FIXME: mpeg4_multi_data_frame */ .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = mpeg4_set_codec_cfg, }, }; static struct mfc_dec_info xvid_dec = { .name = "XVID", .codectype = XVID_DEC, .codecid = 2, .d_priv_size = sizeof(struct mfc_dec_mpeg4), .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = mpeg4_pre_seq_start, .post_seq_start = mpeg4_post_seq_start, .set_init_arg = mpeg4_set_init_arg, .set_codec_bufs = mpeg4_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, /* FIXME: mpeg4_multi_data_frame */ .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = mpeg4_set_codec_cfg, }, }; static struct mfc_dec_info mpeg1_dec = { .name = "MPEG1", .codectype = MPEG1_DEC, .codecid = 3, .d_priv_size = 0, .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = pre_seq_start, .post_seq_start = post_seq_start, .set_init_arg = set_init_arg, .set_codec_bufs = NULL, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = set_codec_cfg, }, }; static struct mfc_dec_info mpeg2_dec = { .name = "MPEG2", .codectype = MPEG2_DEC, .codecid = 3, .d_priv_size = 0, .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = pre_seq_start, .post_seq_start = post_seq_start, .set_init_arg = set_init_arg, .set_codec_bufs = NULL, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = set_codec_cfg, }, }; static struct mfc_dec_info h263_dec = { .name = "H263", .codectype = H263_DEC, .codecid = 4, .d_priv_size = 0, .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = pre_seq_start, .post_seq_start = post_seq_start, .set_init_arg = set_init_arg, .set_codec_bufs = h263_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = set_codec_cfg, }, }; static struct mfc_dec_info vc1rcv_dec = { .name = "VC1RCV", .codectype = VC1RCV_DEC, .codecid = 5, .d_priv_size = 0, .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = pre_seq_start, .post_seq_start = vc1_post_seq_start, .set_init_arg = set_init_arg, .set_codec_bufs = vc1_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = NULL, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = set_codec_cfg, }, }; static struct mfc_dec_info fimv1_dec = { .name = "FIMV1", .codectype = FIMV1_DEC, .codecid = 6, .d_priv_size = sizeof(struct mfc_dec_fimv1), .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = fimv1_pre_seq_start, .post_seq_start = fimv1_post_seq_start, .set_init_arg = set_init_arg, /* FIMXE */ .set_codec_bufs = mpeg4_set_codec_bufs, /* FIXME */ .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = mpeg4_multi_data_frame, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = fimv1_set_codec_cfg, }, }; static struct mfc_dec_info fimv2_dec = { .name = "FIMV2", .codectype = FIMV2_DEC, .codecid = 7, .d_priv_size = sizeof(struct mfc_dec_mpeg4), .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = mpeg4_pre_seq_start, .post_seq_start = mpeg4_post_seq_start, .set_init_arg = mpeg4_set_init_arg, .set_codec_bufs = mpeg4_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = mpeg4_multi_data_frame, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = mpeg4_set_codec_cfg, }, }; static struct mfc_dec_info fimv3_dec = { .name = "FIMV3", .codectype = FIMV3_DEC, .codecid = 8, .d_priv_size = sizeof(struct mfc_dec_mpeg4), .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = mpeg4_pre_seq_start, .post_seq_start = mpeg4_post_seq_start, .set_init_arg = mpeg4_set_init_arg, .set_codec_bufs = mpeg4_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = mpeg4_multi_data_frame, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = mpeg4_set_codec_cfg, }, }; static struct mfc_dec_info fimv4_dec = { .name = "FIMV4", .codectype = FIMV4_DEC, .codecid = 9, .d_priv_size = sizeof(struct mfc_dec_mpeg4), .c_ops = { .alloc_ctx_buf = alloc_ctx_buf, .alloc_desc_buf = alloc_desc_buf, .pre_seq_start = mpeg4_pre_seq_start, .post_seq_start = mpeg4_post_seq_start, .set_init_arg = mpeg4_set_init_arg, .set_codec_bufs = mpeg4_set_codec_bufs, .set_dpbs = set_dpbs, .pre_frame_start = NULL, .post_frame_start = NULL, .multi_data_frame = mpeg4_multi_data_frame, .set_exe_arg = set_exe_arg, .get_codec_cfg = get_codec_cfg, .set_codec_cfg = mpeg4_set_codec_cfg, }, }; static int CheckMPEG4StartCode(unsigned char *src_mem, unsigned int remainSize) { unsigned int index = 0; for (index = 0; index < remainSize-3; index++) { if ((src_mem[index] == 0x00) && (src_mem[index+1] == 0x00) && (src_mem[index+2] == 0x01)) return index; } return -1; } static int CheckDecStartCode(unsigned char *src_mem, unsigned int nstreamSize, SSBSIP_MFC_CODEC_TYPE nCodecType) { unsigned int index = 0; /* Check Start Code within "isearchSize" bytes */ unsigned int isearchSize = 20; unsigned int nShift = 0; unsigned char nFlag = 0xFF; if (nCodecType == H263_DEC) { nFlag = 0x08; nShift = 4; } else if (nCodecType == MPEG4_DEC) { nFlag = 0x01; nShift = 0; } else if (nCodecType == H264_DEC) { nFlag = 0x01; nShift = 0; } else nFlag = 0xFF; /* Last frame detection from user */ if (nstreamSize == 0) nFlag = 0xFF; if (nFlag == 0xFF) return 0; if (nstreamSize > 3) { if (nstreamSize > isearchSize) { for (index = 0; index < isearchSize-3; index++) { if ((src_mem[index] == 0x00) && (src_mem[index+1] == 0x00) && ((src_mem[index+2] >> nShift) == nFlag)) return index; } } else { for (index = 0; index < nstreamSize - 3; index++) { if ((src_mem[index] == 0x00) && (src_mem[index+1] == 0x00) && ((src_mem[index+2] >> nShift) == nFlag)) return index; } } } return -1; } void mfc_init_decoders(void) { list_add_tail(&unknown_dec.list, &mfc_decoders); list_add_tail(&h264_dec.list, &mfc_decoders); list_add_tail(&vc1_dec.list, &mfc_decoders); list_add_tail(&mpeg4_dec.list, &mfc_decoders); list_add_tail(&xvid_dec.list, &mfc_decoders); list_add_tail(&mpeg1_dec.list, &mfc_decoders); list_add_tail(&mpeg2_dec.list, &mfc_decoders); list_add_tail(&h263_dec.list, &mfc_decoders); list_add_tail(&vc1rcv_dec.list, &mfc_decoders); list_add_tail(&fimv1_dec.list, &mfc_decoders); list_add_tail(&fimv2_dec.list, &mfc_decoders); list_add_tail(&fimv3_dec.list, &mfc_decoders); list_add_tail(&fimv4_dec.list, &mfc_decoders); /* FIXME: 19, 20 */ } static int mfc_set_decoder(struct mfc_inst_ctx *ctx, SSBSIP_MFC_CODEC_TYPE codectype) { struct list_head *pos; struct mfc_dec_info *decoder; struct mfc_dec_ctx *dec_ctx; ctx->codecid = -1; /* find and set codec private */ list_for_each(pos, &mfc_decoders) { decoder = list_entry(pos, struct mfc_dec_info, list); if (decoder->codectype == codectype) { if (decoder->codecid < 0) break; /* Allocate Decoder context memory */ dec_ctx = kzalloc(sizeof(struct mfc_dec_ctx), GFP_KERNEL); if (!dec_ctx) { mfc_err("failed to allocate codec private\n"); return -ENOMEM; } ctx->c_priv = dec_ctx; /* Allocate Decoder context private memory */ dec_ctx->d_priv = kzalloc(decoder->d_priv_size, GFP_KERNEL); if (!dec_ctx->d_priv) { mfc_err("failed to allocate decoder private\n"); kfree(dec_ctx); ctx->c_priv = NULL; return -ENOMEM; } ctx->codecid = decoder->codecid; ctx->type = DECODER; ctx->c_ops = (struct codec_operations *)&decoder->c_ops; break; } } if (ctx->codecid < 0) mfc_err("couldn't find proper decoder codec type: %d\n", codectype); return ctx->codecid; } static void mfc_set_stream_info( struct mfc_inst_ctx *ctx, unsigned int addr, unsigned int size, unsigned int ofs) { if (ctx->buf_cache_type == CACHE) { flush_all_cpu_caches(); outer_flush_all(); } write_reg(addr, MFC_SI_CH1_ES_ADR); write_reg(size, MFC_SI_CH1_ES_SIZE); /* FIXME: IOCTL_MFC_GET_IN_BUF size */ write_reg(MFC_CPB_SIZE, MFC_SI_CH1_CPB_SIZE); write_reg(ctx->descbufofs, MFC_SI_CH1_DESC_ADR); write_reg(ctx->descbufsize, MFC_SI_CH1_DESC_SIZE); /* FIXME: right position */ write_shm(ctx, ofs, START_BYTE_NUM); } int mfc_init_decoding(struct mfc_inst_ctx *ctx, union mfc_args *args) { struct mfc_dec_init_arg *init_arg = (struct mfc_dec_init_arg *)args; struct mfc_dec_ctx *dec_ctx = NULL; struct mfc_pre_cfg *precfg; struct list_head *pos, *nxt; int ret; long mem_ofs; ret = mfc_set_decoder(ctx, init_arg->in_codec_type); if (ret < 0) { mfc_err("failed to setup decoder codec\n"); ret = MFC_DEC_INIT_FAIL; goto err_codec_setup; } dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; dec_ctx->streamaddr = init_arg->in_strm_buf; dec_ctx->streamsize = init_arg->in_strm_size; mfc_dbg("stream size: %d", init_arg->in_strm_size); dec_ctx->crc = init_arg->in_crc; dec_ctx->pixelcache = init_arg->in_pixelcache; dec_ctx->slice = 0; mfc_warn("Slice Mode disabled forcefully\n"); dec_ctx->numextradpb = init_arg->in_numextradpb; dec_ctx->dpbflush = 0; dec_ctx->ispackedpb = init_arg->in_packed_PB; /* * assign pre configuration values to instance context */ list_for_each_safe(pos, nxt, &ctx->presetcfgs) { precfg = list_entry(pos, struct mfc_pre_cfg, list); if (ctx->c_ops->set_codec_cfg) { ret = ctx->c_ops->set_codec_cfg(ctx, precfg->type, precfg->values); if (ret < 0) mfc_warn("cannot set preset config type: 0x%08x: %d", precfg->type, ret); } } mfc_set_inst_state(ctx, INST_STATE_SETUP); /* * allocate context buffer */ #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if ((!ctx->drm_flag) && (ctx->c_ops->alloc_ctx_buf)) { #else if (ctx->c_ops->alloc_ctx_buf) { #endif if (ctx->c_ops->alloc_ctx_buf(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_ctx_buf; } } /* [crc, pixelcache] */ ret = mfc_cmd_inst_open(ctx); if (ret < 0) goto err_inst_open; mfc_set_inst_state(ctx, INST_STATE_OPEN); if (init_shm(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_shm_init; } /* * allocate descriptor buffer */ if (ctx->c_ops->alloc_desc_buf) { if (ctx->c_ops->alloc_desc_buf(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_desc_buf; } } /* * execute pre sequence start operation * [slice] */ if (ctx->c_ops->pre_seq_start) { if (ctx->c_ops->pre_seq_start(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_pre_seq; } } /* FIXME: move to pre_seq_start */ mem_ofs = mfc_mem_ext_ofs(dec_ctx->streamaddr, dec_ctx->streamsize, PORT_A); if (mem_ofs < 0) { ret = MFC_DEC_INIT_FAIL; goto err_mem_ofs; } else { mfc_set_stream_info(ctx, mem_ofs >> 11, dec_ctx->streamsize, 0); } ret = mfc_cmd_seq_start(ctx); if (ret < 0) goto err_seq_start; /* [numextradpb] */ if (ctx->c_ops->post_seq_start) { if (ctx->c_ops->post_seq_start(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_post_seq; } } if (ctx->height > MAX_VER_SIZE) { if (ctx->height > MAX_HOR_SIZE) { mfc_err("Not support resolution: %dx%d\n", ctx->width, ctx->height); goto err_chk_res; } if (ctx->width > MAX_VER_SIZE) { mfc_err("Not support resolution: %dx%d\n", ctx->width, ctx->height); goto err_chk_res; } } else { if (ctx->width > MAX_HOR_SIZE) { mfc_err("Not support resolution: %dx%d\n", ctx->width, ctx->height); goto err_chk_res; } } if (ctx->c_ops->set_init_arg) { if (ctx->c_ops->set_init_arg(ctx, (void *)init_arg) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_set_arg; } } if (dec_ctx->numtotaldpb < 7) dec_ctx->numtotaldpb = 7; mfc_dbg("H: %d, W: %d, DPB_Count: %d", ctx->width, ctx->height, dec_ctx->numtotaldpb); #if defined(CONFIG_BUSFREQ) /* Lock MFC & Bus FREQ for high resolution */ if (ctx->width >= MAX_HOR_RES || ctx->height >= MAX_VER_RES) { if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) { exynos4_busfreq_lock(DVFS_LOCK_ID_MFC, BUS_L0); mfc_dbg("Bus FREQ locked to L0\n"); } atomic_inc(&ctx->dev->busfreq_lock_cnt); ctx->busfreq_flag = true; } else { #if defined(CONFIG_CPU_EXYNOS4210) /* Fix MFC & Bus Frequency for better performance */ if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) { exynos4_busfreq_lock(DVFS_LOCK_ID_MFC, BUS_L1); mfc_dbg("Bus FREQ locked to L1\n"); } atomic_inc(&ctx->dev->busfreq_lock_cnt); ctx->busfreq_flag = true; #endif } #endif #if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ) if ((ctx->width >= 1280 && ctx->height >= 720) || (ctx->width >= 720 && ctx->height >= 1280)) { if (atomic_read(&ctx->dev->cpufreq_lock_cnt) == 0) { if (0 == ctx->dev->cpufreq_level) /* 500MHz */ exynos_cpufreq_get_level(500000, &ctx->dev->cpufreq_level); exynos_cpufreq_lock(DVFS_LOCK_ID_MFC, ctx->dev->cpufreq_level); mfc_dbg("[%s] CPU Freq Locked 500MHz!\n", __func__); } atomic_inc(&ctx->dev->cpufreq_lock_cnt); ctx->cpufreq_flag = true; } #endif #ifdef CONFIG_BUSFREQ_OPP if (HD_MOVIE_SIZE_MULTIPLY_WIDTH_HEIGHT > (ctx->width * ctx->height)) { if (atomic_read(&ctx->dev->dmcthreshold_lock_cnt) == 0) { mfc_info("Implement set dmc_max_threshold\n"); if (soc_is_exynos4212()) { dmc_max_threshold = EXYNOS4212_DMC_MAX_THRESHOLD + 5; } else if (soc_is_exynos4412()) { if (samsung_rev() >= EXYNOS4412_REV_2_0) dmc_max_threshold = PRIME_DMC_MAX_THRESHOLD + 5; else dmc_max_threshold = EXYNOS4412_DMC_MAX_THRESHOLD + 5; } else { pr_err("Unsupported model.\n"); return -EINVAL; } } atomic_inc(&ctx->dev->dmcthreshold_lock_cnt); ctx->dmcthreshold_flag = true; } #endif /* * allocate & set codec buffers */ if (ctx->c_ops->set_codec_bufs) { if (ctx->c_ops->set_codec_bufs(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_codec_bufs; } } /* * allocate & set DPBs */ if (ctx->c_ops->set_dpbs) { if (ctx->c_ops->set_dpbs(ctx) < 0) { ret = MFC_DEC_INIT_FAIL; goto err_dpbs_set; } } ret = mfc_cmd_init_buffers(ctx); if (ret < 0) goto err_buf_init; mfc_set_inst_state(ctx, INST_STATE_INIT); while (!list_empty(&ctx->presetcfgs)) { precfg = list_entry((&ctx->presetcfgs)->next, struct mfc_pre_cfg, list); mfc_dbg("remove used preset config [0x%08x]\n", precfg->type); list_del(&precfg->list); kfree(precfg); } INIT_LIST_HEAD(&ctx->presetcfgs); mfc_print_buf(); return MFC_OK; err_buf_init: mfc_free_buf_type(ctx->id, MBT_DPB); err_dpbs_set: mfc_free_buf_type(ctx->id, MBT_CODEC); err_codec_bufs: #if defined(CONFIG_BUSFREQ) /* Release MFC & Bus Frequency lock for High resolution */ if (ctx->busfreq_flag == true) { atomic_dec(&ctx->dev->busfreq_lock_cnt); ctx->busfreq_flag = false; if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) { exynos4_busfreq_lock_free(DVFS_LOCK_ID_MFC); mfc_dbg("Bus FREQ released\n"); } } #endif err_set_arg: err_chk_res: err_post_seq: err_seq_start: #ifdef DUMP_STREAM mfc_fw_debug(); dump_stream(dec_ctx->streamaddr, dec_ctx->streamsize); #endif err_mem_ofs: err_pre_seq: mfc_free_buf_type(ctx->id, MBT_DESC); err_desc_buf: mfc_free_buf_type(ctx->id, MBT_SHM); ctx->shm = NULL; ctx->shmofs = 0; err_shm_init: mfc_cmd_inst_close(ctx); ctx->state = INST_STATE_SETUP; err_inst_open: mfc_free_buf_type(ctx->id, MBT_CTX); err_ctx_buf: if (dec_ctx->d_priv) kfree(dec_ctx->d_priv); kfree(dec_ctx); ctx->c_priv = NULL; ctx->codecid = -1; ctx->type = 0; ctx->c_ops = NULL; ctx->state = INST_STATE_CREATE; err_codec_setup: return ret; } int mfc_change_resolution(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg) { int ret; mfc_free_buf_type(ctx->id, MBT_DPB); ret = mfc_cmd_seq_start(ctx); if (ret < 0) return ret; /* [numextradpb] */ if (ctx->c_ops->post_seq_start) { if (ctx->c_ops->post_seq_start(ctx) < 0) return MFC_DEC_INIT_FAIL; } if (ctx->height > MAX_VER_SIZE) { if (ctx->height > MAX_HOR_SIZE) { mfc_err("Not support resolution: %dx%d\n", ctx->width, ctx->height); return MFC_DEC_INIT_FAIL; } if (ctx->width > MAX_VER_SIZE) { mfc_err("Not support resolution: %dx%d\n", ctx->width, ctx->height); return MFC_DEC_INIT_FAIL; } } else { if (ctx->width > MAX_HOR_SIZE) { mfc_err("Not support resolution: %dx%d\n", ctx->width, ctx->height); return MFC_DEC_INIT_FAIL; } } exe_arg->out_img_width = ctx->width; exe_arg->out_img_height = ctx->height; exe_arg->out_buf_width = ALIGN(ctx->width, ALIGN_W); exe_arg->out_buf_height = ALIGN(ctx->height, ALIGN_H); /* * allocate & set DPBs */ if (ctx->c_ops->set_dpbs) { if (ctx->c_ops->set_dpbs(ctx) < 0) return MFC_DEC_INIT_FAIL; } ret = mfc_cmd_init_buffers(ctx); #ifdef CONFIG_SLP if (ctx->codecid == H264_DEC) { exe_arg->out_crop_right_offset = (read_shm(ctx, CROP_INFO1) >> 16) & 0xFFFF; exe_arg->out_crop_left_offset = read_shm(ctx, CROP_INFO1) & 0xFFFF; exe_arg->out_crop_bottom_offset = (read_shm(ctx, CROP_INFO2) >> 16) & 0xFFFF; exe_arg->out_crop_top_offset = read_shm(ctx, CROP_INFO2) & 0xFFFF; mfc_dbg("mfc_change_resolution: crop info t: %d, r: %d, b: %d, l: %d\n", exe_arg->out_crop_top_offset, exe_arg->out_crop_right_offset, exe_arg->out_crop_bottom_offset, exe_arg->out_crop_left_offset); } #endif if (ret < 0) return ret; return MFC_OK; } int mfc_check_resolution_change(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg) { int resol_status; if (exe_arg->out_display_status != DISP_S_DECODING) return 0; resol_status = (read_reg(MFC_SI_DISPLAY_STATUS) >> DISP_RC_SHIFT) & DISP_RC_MASK; if (resol_status == DISP_RC_INC || resol_status == DISP_RC_DEC) { ctx->resolution_status = RES_SET_CHANGE; mfc_dbg("Change Resolution status: %d\n", resol_status); } return 0; } static int mfc_decoding_frame(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg, int *consumed) { int start_ofs = *consumed; int display_luma_addr; int display_chroma_addr; int display_frame_type; int display_frame_tag; unsigned char *stream_vir; int ret; struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; long mem_ofs; #ifdef CONFIG_VIDEO_MFC_VCM_UMP void *ump_handle; #endif #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if (!ctx->drm_flag) { #endif /* Check Frame Start code */ stream_vir = phys_to_virt(exe_arg->in_strm_buf + start_ofs); ret = CheckDecStartCode(stream_vir, exe_arg->in_strm_size, exe_arg->in_codec_type); if (ret < 0) { mfc_err("Frame Check start Code Failed\n"); /* FIXME: Need to define proper error */ return MFC_FRM_BUF_SIZE_FAIL; } #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION } #endif /* Set Frame Tag */ write_shm(ctx, dec_ctx->frametag, SET_FRAME_TAG); /* FIXME: */ write_reg(0xFFFFFFFF, MFC_SI_CH1_RELEASE_BUF); if (dec_ctx->dpbflush) { unsigned int reg; reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL); reg &= ~(1<<14); reg |= (1<<14); write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL); /* for DPB Flush*/ /*clear dbp flush in context*/ dec_ctx->dpbflush = 0; } mem_ofs = mfc_mem_ext_ofs(exe_arg->in_strm_buf, exe_arg->in_strm_size, PORT_A); if (mem_ofs < 0) return MFC_DEC_EXE_ERR; else mfc_set_stream_info(ctx, mem_ofs >> 11, exe_arg->in_strm_size, start_ofs); /* lastframe: mfc_dec_cfg */ ret = mfc_cmd_frame_start(ctx); if (ret < 0) return ret; if (ctx->c_ops->post_frame_start) { if (ctx->c_ops->post_frame_start(ctx) < 0) return MFC_DEC_EXE_ERR; } /* update display status information */ dec_ctx->dispstatus = read_reg(MFC_SI_DISPLAY_STATUS) & DISP_S_MASK; /* get decode status, frame type */ dec_ctx->decstatus = read_reg(MFC_SI_DECODE_STATUS) & DEC_S_MASK; dec_ctx->decframetype = read_reg(MFC_SI_FRAME_TYPE) & DEC_FRM_MASK; if (dec_ctx->dispstatus == DISP_S_DECODING) { display_luma_addr = 0; display_chroma_addr = 0; display_frame_type = DISP_FRM_X; display_frame_tag = read_shm(ctx, GET_FRAME_TAG_TOP); } else { display_luma_addr = read_reg(MFC_SI_DISPLAY_Y_ADR); display_chroma_addr = read_reg(MFC_SI_DISPLAY_C_ADR); display_frame_type = get_disp_frame_type(); display_frame_tag = read_shm(ctx, GET_FRAME_TAG_TOP); if (dec_ctx->ispackedpb) { if ((dec_ctx->decframetype == DEC_FRM_P) || (dec_ctx->decframetype == DEC_FRM_I)) { if (display_frame_type == DISP_FRM_N) display_frame_type = dec_ctx->predispframetype; } else { if (dec_ctx->predisplumaaddr != 0) { display_luma_addr = dec_ctx->predisplumaaddr; display_chroma_addr = dec_ctx->predispchromaaddr; display_frame_type = dec_ctx->predispframetype; /* over write frame tag */ display_frame_tag = dec_ctx->predispframetag; } } /* save the display addr */ dec_ctx->predisplumaaddr = read_reg(MFC_SI_DISPLAY_Y_ADR); dec_ctx->predispchromaaddr = read_reg(MFC_SI_DISPLAY_C_ADR); /* save the display frame type */ if (get_disp_frame_type() != DISP_FRM_N) { dec_ctx->predispframetype = get_disp_frame_type(); /* Set Frame Tag */ dec_ctx->predispframetag = read_shm(ctx, GET_FRAME_TAG_TOP); } mfc_dbg("pre_luma_addr: 0x%08x, pre_chroma_addr:" "0x%08x, pre_disp_frame_type: %d\n", (dec_ctx->predisplumaaddr << 11), (dec_ctx->predispchromaaddr << 11), dec_ctx->predispframetype); } } /* handle ImmeidatelyDisplay for Seek, I frame only */ if (dec_ctx->immediatelydisplay) { mfc_dbg("Immediately display\n"); dec_ctx->dispstatus = dec_ctx->decstatus; /* update frame tag information with current ID */ exe_arg->out_frametag_top = dec_ctx->frametag; /* FIXME : need to check this */ exe_arg->out_frametag_bottom = 0; if (dec_ctx->decstatus == DEC_S_DD) { mfc_dbg("Immediately display status: DEC_S_DD\n"); display_luma_addr = read_reg(MFC_SI_DECODE_Y_ADR); display_chroma_addr = read_reg(MFC_SI_DECODE_C_ADR); } display_frame_type = dec_ctx->decframetype; /* clear Immediately Display in decode context */ dec_ctx->immediatelydisplay = 0; } else { /* Get Frame Tag top and bottom */ exe_arg->out_frametag_top = display_frame_tag; exe_arg->out_frametag_bottom = read_shm(ctx, GET_FRAME_TAG_BOT); } mfc_dbg("decode y: 0x%08x, c: 0x%08x\n", read_reg(MFC_SI_DECODE_Y_ADR) << 11, read_reg(MFC_SI_DECODE_C_ADR) << 11); exe_arg->out_display_status = dec_ctx->dispstatus; #ifdef CONFIG_SLP_DMABUF if (exe_arg->memory_type == MEMORY_DMABUF) { exe_arg->out_display_Y_addr = mfc_get_buf_dmabuf(display_luma_addr << 11); if (exe_arg->out_display_Y_addr < 0) { mfc_err("mfc_get_buf_dmabuf : Get Y fd error %d\n", exe_arg->out_display_Y_addr); return MFC_DEC_EXE_ERR; } exe_arg->out_display_C_addr = mfc_get_buf_dmabuf(display_chroma_addr << 11); if (exe_arg->out_display_C_addr < 0) { mfc_err("mfc_get_buf_dmabuf : Get C fd error %d\n", exe_arg->out_display_C_addr); return MFC_DEC_EXE_ERR; } } else { #endif exe_arg->out_display_Y_addr = (display_luma_addr << 11); exe_arg->out_display_C_addr = (display_chroma_addr << 11); #ifdef CONFIG_SLP_DMABUF } #endif exe_arg->out_disp_pic_frame_type = display_frame_type; exe_arg->out_y_offset = mfc_mem_data_ofs(display_luma_addr << 11, 1); exe_arg->out_c_offset = mfc_mem_data_ofs(display_chroma_addr << 11, 1); #if defined(CONFIG_VIDEO_MFC_VCM_UMP) exe_arg->out_y_secure_id = 0; exe_arg->out_c_secure_id = 0; ump_handle = mfc_get_buf_ump_handle(out_display_Y_addr << 11); if (ump_handle != NULL) exe_arg->out_y_secure_id = mfc_ump_get_id(ump_handle); ump_handle = mfc_get_buf_ump_handle(out_display_C_addr << 11); if (ump_handle != NULL) exe_arg->out_c_secure_id = mfc_ump_get_id(ump_handle); mfc_dbg("secure IDs Y: 0x%08x, C:0x%08x\n", exe_arg->out_y_secure_id, exe_arg->out_c_secure_id); #elif defined(CONFIG_S5P_VMEM) exe_arg->out_y_cookie = s5p_getcookie((void *)(out_display_Y_addr << 11)); exe_arg->out_c_cookie = s5p_getcookie((void *)(out_display_C_addr << 11)); mfc_dbg("cookie Y: 0x%08x, C:0x%08x\n", exe_arg->out_y_cookie, exe_arg->out_c_cookie); #endif exe_arg->out_pic_time_top = read_shm(ctx, PIC_TIME_TOP); exe_arg->out_pic_time_bottom = read_shm(ctx, PIC_TIME_BOT); exe_arg->out_consumed_byte = read_reg(MFC_SI_FRM_COUNT); if (ctx->codecid == H264_DEC) { exe_arg->out_crop_right_offset = (read_shm(ctx, CROP_INFO1) >> 16) & 0xFFFF; exe_arg->out_crop_left_offset = read_shm(ctx, CROP_INFO1) & 0xFFFF; exe_arg->out_crop_bottom_offset = (read_shm(ctx, CROP_INFO2) >> 16) & 0xFFFF; exe_arg->out_crop_top_offset = read_shm(ctx, CROP_INFO2) & 0xFFFF; mfc_dbg("crop info t: %d, r: %d, b: %d, l: %d\n", exe_arg->out_crop_top_offset, exe_arg->out_crop_right_offset, exe_arg->out_crop_bottom_offset, exe_arg->out_crop_left_offset); } /* mfc_dbg("decode frame type: %d\n", dec_ctx->decframetype); mfc_dbg("display frame type: %d\n", exe_arg->out_disp_pic_frame_type); mfc_dbg("display y: 0x%08x, c: 0x%08x\n", exe_arg->out_display_Y_addr, exe_arg->out_display_C_addr); */ mfc_dbg("decode frame type: %d\n", dec_ctx->decframetype); mfc_dbg("display frame type: %d,%d\n", exe_arg->out_disp_pic_frame_type, exe_arg->out_frametag_top); mfc_dbg("display y: 0x%08x, c: 0x%08x\n", exe_arg->out_display_Y_addr, exe_arg->out_display_C_addr); *consumed = read_reg(MFC_SI_FRM_COUNT); mfc_dbg("stream size: %d, consumed: %d\n", exe_arg->in_strm_size, *consumed); return MFC_OK; } int mfc_exec_decoding(struct mfc_inst_ctx *ctx, union mfc_args *args) { struct mfc_dec_exe_arg *exe_arg; int ret; int consumed = 0; struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv; int sec_try_tag; /* tag store for second try */ exe_arg = (struct mfc_dec_exe_arg *)args; /* set pre-decoding informations */ dec_ctx->streamaddr = exe_arg->in_strm_buf; dec_ctx->streamsize = exe_arg->in_strm_size; dec_ctx->frametag = exe_arg->in_frametag; dec_ctx->immediatelydisplay = exe_arg->in_immediately_disp; mfc_set_inst_state(ctx, INST_STATE_EXE); ret = mfc_decoding_frame(ctx, exe_arg, &consumed); sec_try_tag = exe_arg->out_frametag_top; mfc_set_inst_state(ctx, INST_STATE_EXE_DONE); if (ret == MFC_OK) { mfc_check_resolution_change(ctx, exe_arg); if (ctx->resolution_status == RES_SET_CHANGE) { ret = mfc_decoding_frame(ctx, exe_arg, &consumed); #ifndef CONFIG_SLP } else if ((ctx->resolution_status == RES_WAIT_FRAME_DONE) && (exe_arg->out_display_status == DISP_S_FINISH)) { exe_arg->out_display_status = DISP_S_RES_CHANGE; ret = mfc_change_resolution(ctx, exe_arg); if (ret != MFC_OK) return ret; ctx->resolution_status = RES_NO_CHANGE; #else } else if (ctx->resolution_status == RES_WAIT_FRAME_DONE) { if (exe_arg->out_display_status == DISP_S_FINISH) { exe_arg->out_display_status = DISP_S_RES_CHANGE_DONE; ret = mfc_change_resolution(ctx, exe_arg); if (ret != MFC_OK) return ret; ctx->resolution_status = RES_NO_CHANGE; } else exe_arg->out_display_status = DISP_S_RES_CHANGING; #endif } if ((dec_ctx->ispackedpb) && (dec_ctx->decframetype == DEC_FRM_P) && (exe_arg->in_strm_size - consumed > 4)) { unsigned char *stream_vir; int offset = 0; mfc_dbg("[%s] strmsize : %d consumed : %d\n", __func__, exe_arg->in_strm_size, consumed); stream_vir = phys_to_virt(exe_arg->in_strm_buf); #ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION if (!ctx->drm_flag) #endif mfc_mem_cache_inv((void *)stream_vir, exe_arg->in_strm_size); offset = CheckMPEG4StartCode(stream_vir+consumed, dec_ctx->streamsize - consumed); if (offset == -1) { mfc_warn("No start code in remained bitstream: %d\n", offset); return ret; } if (offset > 4) consumed += offset; exe_arg->in_strm_size -= consumed; dec_ctx->frametag = exe_arg->in_frametag; dec_ctx->immediatelydisplay = exe_arg->in_immediately_disp; mfc_set_inst_state(ctx, INST_STATE_EXE); ret = mfc_decoding_frame(ctx, exe_arg, &consumed); exe_arg->out_frametag_top = sec_try_tag; mfc_set_inst_state(ctx, INST_STATE_EXE_DONE); } } /* if (ctx->c_ops->set_dpbs) { if (ctx->c_ops->set_dpbs(ctx) < 0) return MFC_DEC_INIT_FAIL; } */ return ret; }
gpl-2.0
kanpol/bibo
miscutils/adjtimex.c
1
3637
/* vi: set sw=4 ts=4: */ /* * adjtimex.c - read, and possibly modify, the Linux kernel `timex' variables. * * Originally written: October 1997 * Last hack: March 2001 * Copyright 1997, 2000, 2001 Larry Doolittle <LRDoolittle@lbl.gov> * * busyboxed 20 March 2001, Larry Doolittle <ldoolitt@recycle.lbl.gov> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //usage:#define adjtimex_trivial_usage //usage: "[-q] [-o OFF] [-f FREQ] [-p TCONST] [-t TICK]" //usage:#define adjtimex_full_usage "\n\n" //usage: "Read or set kernel time variables. See adjtimex(2)\n" //usage: "\n -q Quiet" //usage: "\n -o OFF Time offset, microseconds" //usage: "\n -f FREQ Frequency adjust, integer kernel units (65536 is 1ppm)" //usage: "\n -t TICK Microseconds per tick, usually 10000" //usage: "\n (positive -t or -f values make clock run faster)" //usage: "\n -p TCONST" #include "libbb.h" #ifndef __BIONIC__ # include <sys/timex.h> #endif static const uint16_t statlist_bit[] = { STA_PLL, STA_PPSFREQ, STA_PPSTIME, STA_FLL, STA_INS, STA_DEL, STA_UNSYNC, STA_FREQHOLD, STA_PPSSIGNAL, STA_PPSJITTER, STA_PPSWANDER, STA_PPSERROR, STA_CLOCKERR, 0 }; static const char statlist_name[] = "PLL" "\0" "PPSFREQ" "\0" "PPSTIME" "\0" "FFL" "\0" "INS" "\0" "DEL" "\0" "UNSYNC" "\0" "FREQHOLD" "\0" "PPSSIGNAL" "\0" "PPSJITTER" "\0" "PPSWANDER" "\0" "PPSERROR" "\0" "CLOCKERR" ; static const char ret_code_descript[] = "clock synchronized" "\0" "insert leap second" "\0" "delete leap second" "\0" "leap second in progress" "\0" "leap second has occurred" "\0" "clock not synchronized" ; int adjtimex_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; int adjtimex_main(int argc UNUSED_PARAM, char **argv) { enum { OPT_quiet = 0x1 }; unsigned opt; char *opt_o, *opt_f, *opt_p, *opt_t; struct timex txc; int i, ret; const char *descript; opt_complementary = "=0"; /* no valid non-option parameters */ opt = getopt32(argv, "qo:f:p:t:", &opt_o, &opt_f, &opt_p, &opt_t); txc.modes = 0; //if (opt & 0x1) // -q if (opt & 0x2) { // -o txc.offset = xatol(opt_o); txc.modes |= ADJ_OFFSET_SINGLESHOT; } if (opt & 0x4) { // -f txc.freq = xatol(opt_f); txc.modes |= ADJ_FREQUENCY; } if (opt & 0x8) { // -p txc.constant = xatol(opt_p); txc.modes |= ADJ_TIMECONST; } if (opt & 0x10) { // -t txc.tick = xatol(opt_t); txc.modes |= ADJ_TICK; } ret = adjtimex(&txc); if (ret < 0) { bb_perror_nomsg_and_die(); } if (!(opt & OPT_quiet)) { const char *sep; const char *name; printf( " mode: %d\n" "-o offset: %ld us\n" "-f freq.adjust: %ld (65536 = 1ppm)\n" " maxerror: %ld\n" " esterror: %ld\n" " status: %d (", txc.modes, txc.offset, txc.freq, txc.maxerror, txc.esterror, txc.status); /* representative output of next code fragment: * "PLL | PPSTIME" */ name = statlist_name; sep = ""; for (i = 0; statlist_bit[i]; i++) { if (txc.status & statlist_bit[i]) { printf("%s%s", sep, name); sep = " | "; } name += strlen(name) + 1; } descript = "error"; if (ret <= 5) descript = nth_string(ret_code_descript, ret); printf(")\n" "-p timeconstant: %ld\n" " precision: %ld us\n" " tolerance: %ld\n" "-t tick: %ld us\n" " time.tv_sec: %ld\n" " time.tv_usec: %ld\n" " return value: %d (%s)\n", txc.constant, txc.precision, txc.tolerance, txc.tick, (long)txc.time.tv_sec, (long)txc.time.tv_usec, ret, descript); } return 0; }
gpl-2.0
kraj/gcc
gcc/testsuite/gcc.target/i386/avx512f-broadcast-pr87767-1.c
1
1321
/* PR target/87767 */ /* { dg-do compile } */ /* { dg-options "-O2 -mavx512f -mavx512dq" } */ /* { dg-additional-options "-fno-PIE" { target ia32 } } */ /* { dg-additional-options "-mdynamic-no-pic" { target { *-*-darwin* && ia32 } } } /* { dg-final { scan-assembler-times "\[^\n\]*\\\{1to8\\\}" 2 { target { ! ia32 } } } } */ /* { dg-final { scan-assembler-times "\[^\n\]*\\\{1to8\\\}" 5 { target ia32 } } } */ /* { dg-final { scan-assembler-times "\[^\n\]*\\\{1to16\\\}" 2 } } */ /* { dg-final { scan-assembler-times "vpbroadcastd\[\\t \]+%(?:r|e)\[^\n\]*, %zmm\[0-9\]+" 3 } } */ /* { dg-final { scan-assembler-times "vpbroadcastq\[\\t \]+%r\[^\n\]*, %zmm\[0-9\]+" 3 { target { ! ia32 } } } } */ typedef int v16si __attribute__ ((vector_size (64))); typedef long long v8di __attribute__ ((vector_size (64))); typedef float v16sf __attribute__ ((vector_size (64))); typedef double v8df __attribute__ ((vector_size (64))); #define CONSTANT 101; #define FOO(VTYPE, OP_NAME, OP) \ VTYPE \ __attribute__ ((noipa)) \ foo_##OP_NAME##_##VTYPE (VTYPE a) \ { \ return a OP CONSTANT; \ } \ FOO (v16si, add, +); FOO (v8di, add, +); FOO (v16sf, add, +); FOO (v8df, add, +); FOO (v16si, sub, -); FOO (v8di, sub, -); FOO (v16si, mul, *); FOO (v8di, mul, *); FOO (v16sf, mul, *); FOO (v8df, mul, *);
gpl-2.0
YOlodfssdf/evolution3d
Tools/Medusa/Plugin/DemoPlugin/DemoPlugin.cpp
1
3780
// DemoPlugin.cpp : Defines the initialization routines for the DLL. // #include "stdafx.h" #include "DemoPlugin.h" #include "Resource.h" #include <BaseLib/xEvol3DBaseInc.h> #include <Application/xPluginMgr.h> #include <BaseLib/xStringHash.h> #include "xMedusaEditor.h" using namespace nsMedusaEditor; #pragma warning (disable:4996) #ifdef _DEBUG #define new DEBUG_NEW #endif // //TODO: If this DLL is dynamically linked against the MFC DLLs, // any functions exported from this DLL which call into // MFC must have the AFX_MANAGE_STATE macro added at the // very beginning of the function. // // For example: // // extern "C" BOOL PASCAL EXPORT ExportedFunction() // { // AFX_MANAGE_STATE(AfxGetStaticModuleState()); // // normal function body here // } // // It is very important that this macro appear in each // function, prior to any calls into MFC. This means that // it must appear as the first statement within the // function, even before any object variable declarations // as their constructors may generate calls into the MFC // DLL. // // Please see MFC Technical Notes 33 and 58 for additional // details. // // CDemoPluginApp BEGIN_MESSAGE_MAP(CDemoPluginApp, CWinApp) END_MESSAGE_MAP() CMEdUiToolBarInfo g_TestTollbar; // CDemoPluginApp construction CDemoPluginApp::CDemoPluginApp() { // TODO: add construction code here, // Place all significant initialization in InitInstance } // The one and only CDemoPluginApp object CDemoPluginApp theApp; // CDemoPluginApp initialization BOOL CDemoPluginApp::InitInstance() { CWinApp::InitInstance(); g_TestTollbar.m_hDll = AfxGetResourceHandle(); wcscpy(g_TestTollbar.m_name , L"Toolbar.Plugin"); wcscpy(g_TestTollbar.m_title , L"ûÃû×ÖµÄToolbar"); g_TestTollbar.m_ResID = IDR_TOOLBAR1; return TRUE; } using namespace XEvol3D; class xTestResourcePlugin : public IPluginObject { class CMyCallback : public CMEdUiToolBarInfo::MEdUIToolbarCallback { public: virtual CMEdUiToolBarInfo::CommandUIStatus OnUpdateCommandUI(int ctrlID , int ctrlIdx) { return CMEdUiToolBarInfo::CUS_ENABLE; } virtual bool OnCommand(int ctrlID , int ctrlIdx) { switch(ctrlID) { case ID_BUTTON32771: MessageBox(GetActiveWindow() , TEXT("ÄãÖªµÀÎÒÊÇ˭ô£¿ÎÒÊǵÚÒ»¸ö") , TEXT("ºÙºÙ") , MB_OK); break; case ID_BUTTON32772: MessageBox(GetActiveWindow() , TEXT("ÄãÖªµÀÎÒÊÇ˭ô£¿ÎÒÊǵڶþ¸ö") , TEXT("ºÙºÙ") , MB_OK); break; case ID_BUTTON32773: MessageBox(GetActiveWindow() , TEXT("ÄãÖªµÀÎÒÊÇ˭ô£¿ÎÒÊǵÚÈý¸ö") , TEXT("ºÙºÙ") , MB_OK); break; } return true; } }; CMyCallback g_Cb; bool start(const wchar_t* pluginName , const wchar_t* pluginPath) { g_TestTollbar.m_funcCallback = &g_Cb; g_TestTollbar.ProcessToolbarID(-1); GetMedusaEditor()->GetUI()->RegisteToolbar(&g_TestTollbar); return true; } void info(xPluginInfo* info) { wcsncpy(info->m_Description , L"ResourcePlugin.Test name=[Test]" , 256); info->m_ID = xStringHash(L"ResourcePlugin.Test"); wcsncpy(info->m_Name , L"ResourcePlugin.Test Plugin" , 32); info->m_Type =ePT_Platform; } bool stop() { return true; } unsigned int nObject() { return 0; } void* createObject(const wchar_t* objName, const void * arg) { return NULL; } const wchar_t** objectList() { static wchar_t* objectList[1] = {NULL}; return (const wchar_t**)objectList; } }; #ifndef _XEVOL_BUILD_STATIC_PLUGIN_ extern "C" _declspec(dllexport) IPluginObject* PLUGIN_ENTRYPOINT() { static xTestResourcePlugin gPluginObject; return &gPluginObject; } #else #endif
gpl-2.0
YOlodfssdf/evolution3d
xEvol3D/mem/MemPool.cpp
1
8618
/* XEvol3D Rendering Engine . (http://gforge.osdn.net.cn/projects/xevol3d/) . Stanly.Lee 2006 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "../xStdPch.h" #include "MemPool.h" #include <cassert> #include <algorithm> #ifdef _WIN32 #include <Windows.h> #endif #include <iostream> using namespace std; /****** ÄÚ´æ³Ø·ÖÅäÆ÷£® Ò²¾ÍÊÇÒ»¸ö¹Ì¶¨´óСµÄÄÚ´æ·ÖÅäÆ÷. ·ÖÅäË㷨Ϊ×Ô´´²¿·Ö£¬Íâ¼ÓLokiµÄÆô·¢£¬ËùÒÔÏÖÔÚËã·¨»ù±¾ÎªLokiÖеÄFixedAlloctor ******/ BEGIN_XEVOL3D_MEM /* m_pData---> [X X X X X ...] µÚÒ»¸ö¿ÉÓÿé --> [3 | _____ ...] [X X X X X ...] [ | _____ ...] [X X X X X ...] */ bool MemPool::MemChunck::init(size_t blockSize, unsigned short blocks) { assert(blockSize > 0); assert(blocks > 0); // Overflow check assert((blockSize * blocks) / blockSize == blocks); m_pData = (unsigned char*) malloc(blockSize * blocks); if(m_pData == NULL) return false; reset(blockSize, blocks); return true; } void* MemPool::MemChunck::alloc(size_t blockSize) { if (!m_blocksAvailable) return 0; //¼ì²éÄÚ´æÊDz»ÊÇÒÀÈ»¶ÔÆëµÄ assert((m_firstAvailableBlock * blockSize) / blockSize == m_firstAvailableBlock); //°ÑµÚÒ»¸ö¿ÉÓ÷µ»Ø¸øÓµ»¤ unsigned char* pResult = m_pData + (m_firstAvailableBlock * blockSize); m_firstAvailableBlock = *((unsigned short*)pResult); -- m_blocksAvailable; return pResult; } void MemPool::MemChunck::dealloc(void* p, size_t blockSize) { assert(p >= m_pData); unsigned char* toRelease = static_cast<unsigned char*>(p); //ÄÚ´æ¶ÔÆë¼ì²é assert((toRelease - m_pData) % blockSize == 0); //°ÑÊͷŵôµÄ¿é¼ÓÈëµ½±íÍ·Àн¨Ò»¸ö±íÍ·,±íÍ·ÏÂÒ»¸ö¿éÖ¸ÏòÔ­À´µÄµÚÒ»¸ö¿ÉÓÿé * ((unsigned short*)toRelease) = m_firstAvailableBlock; //µÚÒ»¸ö¿ÉÓÿéÖ¸Ïò±íÍ· m_firstAvailableBlock = static_cast<unsigned short>( (toRelease - m_pData) / blockSize ) ; //¿é¶ÔÆë¼ì²é assert(m_firstAvailableBlock == (toRelease - m_pData) / blockSize); ++ m_blocksAvailable; } void MemPool::MemChunck::reset(size_t blockSize, unsigned short blocks) { assert(blockSize > 0); assert(blocks > 0); //Òè³ö¼ì²é assert((blockSize * blocks) / blockSize == blocks); m_firstAvailableBlock = 0; m_blocksAvailable = blocks; //Ìî³äÄÚ´æ¿éµÄÁ´ unsigned short i = 0; unsigned char* p = m_pData; for (; i != blocks; p += blockSize) { unsigned short * pNext =(unsigned short*) p; *pNext = ++i; } } void MemPool::MemChunck::Release() { free((void*)m_pData); } bool MemPool::MemChunck::is_ptr(void* p,size_t blockSize, unsigned short blocks) { if( p < m_pData) return false; //ÄÚ´æ²»ÔÚÕâ¸öÀïÃæ¡£Ò²²»ÊÇËû·ÖÅäµÄ¡£ if( p > m_pData + blockSize * blocks) return false; //Ö¸ÕëûÔÚblockSize±ß½çÉ϶ÔÆ룮¿Ï¶¨²»ÊÇÓÉÕâ¸öMemChunck·ÖÅäµÄ if( ((unsigned char*)p - m_pData)%blockSize != 0) return false; return true; } //====================================================================================== //¡¡ÒÔÏÂΪMemPoolµÄÀ࣮ // º¯ÊýµÄʵÏÖ£® //====================================================================================== MemPool::MemPool(size_t block_size, size_t block_reserved,size_t chunck_size) { init(block_size,block_reserved,chunck_size); } bool MemPool::init(size_t block_size, size_t block_reserved,size_t chunck_size) { if(block_size < sizeof(unsigned short) ) block_size = sizeof(unsigned short) ; m_blockSize = block_size; assert(chunck_size > block_size); m_blocksPerChunk = chunck_size / block_size; m_avaliableBlocks = 0; size_t nChuncks = (block_reserved + 1 ) / m_blocksPerChunk; for(size_t i = 0 ; i < nChuncks ; i ++ ) { MemChunck chunck; if( chunck.init(m_blockSize,(unsigned short)m_blocksPerChunk) == false) return false; m_Chuncks.push_back(chunck); m_avaliableBlocks += m_blocksPerChunk; } m_lastChunk = 0; return true; } void MemPool::clear() { size_t nChuncks = m_Chuncks.size(); for(size_t i = 0 ; i < nChuncks ; i ++ ) { m_Chuncks[i].Release(); } m_avaliableBlocks = 0; } void MemPool::clear_unused() { MemChunks::iterator i = m_Chuncks.end(); size_t n = m_Chuncks.size(); while( (n--)!=0) { --i; if(i->m_blocksAvailable == m_blocksPerChunk) { i->Release(); m_Chuncks.erase(i); } } m_lastChunk = 0; } //---------------------------------------------------- //ΪÕâ¸öMemPool±£Áô¶àÉÙ¸ö×ֽڵĿռä //---------------------------------------------------- void MemPool::reserve(size_t block_reserved) { if(m_avaliableBlocks >= block_reserved) return ; size_t nChuncks = (block_reserved - m_avaliableBlocks + 1) / m_blocksPerChunk; for(size_t i = 0 ; i < nChuncks ; i ++ ) { MemChunck chunck; if( chunck.init(m_blockSize,(unsigned short)m_blocksPerChunk) == false) return ; m_Chuncks.push_back(chunck); m_avaliableBlocks += m_blocksPerChunk; } } //---------------------------------------------------- //ÊÍ·ÅûÓÐÓùýµÄ¿Õ¼ä£® //---------------------------------------------------- size_t MemPool::unused_block() { return m_avaliableBlocks; } //---------------------------------------------------- //²é¿´»¹ÓжàÉÙ¿Õ¼ä¿ÉÒÔÓà //---------------------------------------------------- size_t MemPool::capacity() { return m_Chuncks.size() * m_blocksPerChunk * m_blockSize; } //-------------------------------------------- -------- //·ÖÅäÒ»¸öÄÚ´æºÍÊÍ·ÅÒ»¸öÄÚ´æ //---------------------------------------------------- void* MemPool::alloc() { //ÏÈÓÃ×îºóÒ»´Î·ÖÅäµÄMemChunckÀ´·ÖÅ䣮 //Èç¹û×îºóÒ»¸ö¿é»¹ÓУ¬¾Í·µ»Ø£® void * ret = m_Chuncks[m_lastChunk].alloc(m_blockSize); if(ret) {m_avaliableBlocks --; return ret;} size_t nChuncks = m_Chuncks.size(); //ÕÒÒ»¸öÓпտéµÄMemChunck for(size_t i = 0 ; i < nChuncks ; i ++ ) { if(m_Chuncks[i].m_blocksAvailable > 0) { //°Ñ×îºóÒ»¸ö·ÖÅä¿éµÄÓαêÖ¸ÏòÓпÕÏеĿ飮 //¿ÉÓÿéµÄ¼ÆÊý¼õÉÙ£¬²¢·ÖÅäÒ»¸ö¿é³öÈ¥ m_lastChunk = (int)i; m_avaliableBlocks --; return m_Chuncks[m_lastChunk].alloc(m_blockSize); } } //½¨Á¢Ò»¸öеÄBlock£¬·Åµ½ÀïÃ森 m_Chuncks.push_back( MemChunck() ); m_lastChunk = (int)nChuncks; //Èç¹û¿é³õʼ»¯Ê§°Ü£¬Ôò±íʾûÓÐÄÚ´æÁË¡£·µ»Ø¿Õ£®±íʾʧ°Ü if( false == m_Chuncks[nChuncks].init(m_blockSize,(unsigned short)m_blocksPerChunk)) return NULL; //н¨Á¢ÁËÒ»¸ö¿é£®·Åµ½×îºó m_avaliableBlocks += (m_blocksPerChunk - 1); return m_Chuncks[m_lastChunk].alloc(m_blockSize); } void MemPool::dealloc(void* p) { if(m_Chuncks[m_lastChunk].is_ptr(p,m_blockSize,(unsigned short)m_blocksPerChunk) == true) { m_Chuncks[m_lastChunk].dealloc(p,m_blockSize); m_avaliableBlocks ++; return ; } size_t nChuncks = m_Chuncks.size(); for(size_t i = 0 ; i < nChuncks ; i ++ ) { if(m_Chuncks[i].is_ptr(p,m_blockSize,(unsigned short)m_blocksPerChunk) == true) { //µ±Ç°ÊÍ·ÅÁËÒ»¸öÄڴ森ÄÇôÕâ¸öChunck¿Ï¶¨ÊÇÓÐÄÚ´æ¿ÉÒÔÓõġ£ m_lastChunk =(int) i; m_Chuncks[m_lastChunk].dealloc(p,m_blockSize); m_avaliableBlocks ++; return ; } } } //---------------------------------------------------- //ÅжÏÒ»¸öÖ¸ÕëÊDz»ÊÇÓÉÕâ¸öPool·ÖÅäµÄ¡£ //---------------------------------------------------- bool MemPool::is_ptr(void* p) { size_t nChuncks = m_Chuncks.size(); for(size_t i = 0 ; i < nChuncks ; i ++ ) { if(m_Chuncks[i].is_ptr(p,m_blockSize,(unsigned short)m_blocksPerChunk) == true) { return true; } } return false; } void MemPool::diagnostic() { size_t nChunck = m_Chuncks.size(); size_t totalPoolSize = capacity(); size_t freeSize = 0; for(size_t i = 0 ; i < nChunck ; i ++) { freeSize += m_Chuncks[i].m_blocksAvailable * m_blockSize; } wchar_t buf[512]; swprintf(buf,L"Mempool:Capacity=%d(int8) unused=%d(int8) blockSize=%d nChunk = %d\n ",totalPoolSize,freeSize , m_blockSize, nChunck ); #ifdef _WIN32 OutputDebugStringW(buf); #else XEVOL_LOG(eXL_DEBUG_NORMAL,buf); #endif } END_XEVOL3D_MEM
gpl-2.0
tricky1997/boa-libevent
extras/scandir.c
1
2390
/* Copyright (c) 2000 Petter Reinholdtsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * scandir.c -- if scandir() is missing, make a replacement */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include "compat.h" /* * XXX This is a simple hack version which doesn't sort the data, and * just passes all unsorted. */ int scandir(const char *dir, struct dirent ***namelist, int (*select) (const struct dirent *), int (*compar) (const struct dirent **, const struct dirent **)) { DIR *d = opendir(dir); struct dirent *current; struct dirent **names; int count = 0; int pos = 0; int result = -1; if (NULL == d) return -1; while (NULL != readdir(d)) count++; names = malloc(sizeof (struct dirent *) * count); closedir(d); d = opendir(dir); if (NULL == d) return -1; while (NULL != (current = readdir(d))) { if (NULL == select || select(current)) { struct dirent *copyentry = malloc(current->d_reclen); memcpy(copyentry, current, current->d_reclen); names[pos] = copyentry; pos++; } } result = closedir(d); if (pos != count) names = realloc(names, sizeof (struct dirent *) * pos); *namelist = names; return pos; }
gpl-2.0
AlexAltea/nucleus
nucleus/system/scei/cellos/lv2/sys_fs.cpp
1
3996
/** * (c) 2014-2016 Alexandro Sanchez Bach. All rights reserved. * Released under GPL v2 license. Read LICENSE for more details. */ #include "sys_fs.h" #include "nucleus/emulator.h" #include "nucleus/filesystem/filesystem_virtual.h" #include "nucleus/logger/logger.h" #include "../lv2.h" #include <cstring> namespace sys { // SysCalls HLE_FUNCTION(sys_fs_open, const S08* path, S32 flags, BE<S32>* fd, U64 mode, const void* arg, U64 size) { // Create file if (flags & CELL_FS_O_CREAT) { kernel.vfs.createFile(path); } // Access mode fs::OpenMode openMode; switch (flags & CELL_FS_O_ACCMODE) { case CELL_FS_O_RDONLY: openMode = fs::Read; break; case CELL_FS_O_WRONLY: if (flags & CELL_FS_O_APPEND) { openMode = fs::WriteAppend; } else if (flags & CELL_FS_O_EXCL) { openMode = fs::Write; } else if (flags & CELL_FS_O_TRUNC) { openMode = fs::Write; } else { openMode = fs::Write; } break; case CELL_FS_O_RDWR: if (flags & CELL_FS_O_TRUNC) { kernel.vfs.createFile(path); } openMode = fs::ReadWrite; } if (!kernel.vfs.existsFile(path)) { return CELL_ENOENT; } auto* file = new sys_fs_t(); file->type = CELL_FS_S_IFREG; file->path = path; file->file = kernel.vfs.openFile(path, openMode); *fd = kernel.objects.add(file, SYS_FS_FD_OBJECT); return CELL_OK; } HLE_FUNCTION(sys_fs_read, S32 fd, void* buf, U64 nbytes, BE<U64>* nread) { auto* descriptor = kernel.objects.get<sys_fs_t>(fd); auto* file = descriptor->file; *nread = file->read(buf, nbytes); return CELL_OK; } HLE_FUNCTION(sys_fs_write, S32 fd, const void* buf, U64 nbytes, BE<U64>* nwrite) { auto* descriptor = kernel.objects.get<sys_fs_t>(fd); auto* file = descriptor->file; *nwrite = file->write(buf, nbytes); return CELL_OK; } HLE_FUNCTION(sys_fs_close, S32 fd) { auto* descriptor = kernel.objects.get<sys_fs_t>(fd); delete descriptor->file; return CELL_OK; } HLE_FUNCTION(sys_fs_fstat, S32 fd, sys_fs_stat_t* sb) { // Check requisites if (sb == kernel.memory->ptr(0)) { return CELL_EFAULT; } auto* descriptor = kernel.objects.get<sys_fs_t>(fd); auto* file = descriptor->file; auto attributes = file->attributes(); sb->st_atime = attributes.timestamp_access; sb->st_ctime = attributes.timestamp_create; sb->st_mtime = attributes.timestamp_write; sb->st_size = attributes.size; sb->st_blksize = attributes.blocksize; sb->st_gid = 0; // Always zero sb->st_uid = 0; // Always zero sb->st_mode = 0; // TODO return CELL_OK; } HLE_FUNCTION(sys_fs_stat, const S08* path, sys_fs_stat_t* sb) { // Check requisites if (path == kernel.memory->ptr(0) || sb == kernel.memory->ptr(0)) { return CELL_EFAULT; } auto attributes = kernel.vfs.getFileAttributes(path); sb->st_atime = attributes.timestamp_access; sb->st_ctime = attributes.timestamp_create; sb->st_mtime = attributes.timestamp_write; sb->st_size = attributes.size; sb->st_blksize = attributes.blocksize; sb->st_gid = 0; // Always zero sb->st_uid = 0; // Always zero sb->st_mode = 0; // TODO return CELL_OK; } HLE_FUNCTION(sys_fs_fcntl, S32 fd, S32 cmd, void* argv, U32 argc) { logger.warning(LOG_HLE, "LV2 Syscall (0x331) called: sys_fs_fcntl"); return CELL_OK; } HLE_FUNCTION(sys_fs_lseek, S32 fd, S64 offset, S32 whence, BE<U64>* pos) { auto* descriptor = kernel.objects.get<sys_fs_t>(fd); auto* file = descriptor->file; switch (whence) { case SYS_FS_SEEK_SET: file->seek(offset, fs::SeekSet); break; case SYS_FS_SEEK_CUR: file->seek(offset, fs::SeekCur); break; case SYS_FS_SEEK_END: file->seek(offset, fs::SeekEnd); break; } *pos = file->tell(); return CELL_OK; } } // namespace sys
gpl-2.0
MS3FGX/libmackerel
example.c
1
1141
// Simple example program using libmackerel #include <stdio.h> #include "libmackerel.h" int main() { // Misc variables int i; char *addr_buffer = {0}; // How many MACs to create int runs = 5; // Get libmackerel ready mac_init(); // Working with a static MAC addr_buffer = "F0:DE:F1:B7:0E:F6"; printf("MAC: %s\n", addr_buffer); printf("----------------------------\n"); // See if it's a MAC if (mac_verify(mac_get_hex(addr_buffer)) == 0) printf("This is a valid MAC.\n"); else printf("This is not a valid MAC!\n"); // Print OUI printf("OUI: %s\n", mac_get_oui(addr_buffer)); // Encode it with CRC32 printf("CRC: %s\n", mac_encode(addr_buffer)); // Obfuscate printf("Anonymized: %s\n", mac_obfuscate(addr_buffer)); // Hex version printf("HEX notation: %s\n", mac_get_hex(addr_buffer)); // Manufacturer printf("Manufacturer: %s\n", mac_get_vendor(addr_buffer)); printf("\n"); printf("Random MACs:\n"); // Main loop for (i = 1; i <= runs; i++) { // Create random MAC, save to buffer addr_buffer = mac_rand(); // Print printf("%s\n", addr_buffer); } return(0); }
gpl-2.0
Megamouse/rpcs3
rpcs3/Emu/Cell/lv2/sys_memory.cpp
1
7150
#include "stdafx.h" #include "sys_memory.h" #include "Emu/Memory/vm_locking.h" #include "Emu/CPU/CPUThread.h" #include "Emu/Cell/ErrorCodes.h" #include "Emu/Cell/SPUThread.h" #include "Emu/IdManager.h" #include "util/vm.hpp" #include "util/asm.hpp" LOG_CHANNEL(sys_memory); // static shared_mutex s_memstats_mtx; struct sys_memory_address_table { atomic_t<lv2_memory_container*> addrs[65536]{}; }; // Todo: fix order of error checks error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptr<u32> alloc_addr) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, flags, alloc_addr); if (!size) { return {CELL_EALIGN, size}; } // Check allocation size const u32 align = flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : flags == 0 ? 0x100000 : 0; if (!align) { return {CELL_EINVAL, flags}; } if (size % align) { return {CELL_EALIGN, size}; } // Get "default" memory container auto& dct = g_fxo->get<lv2_memory_container>(); // Try to get "physical memory" if (!dct.take(size)) { return CELL_ENOMEM; } if (const auto area = vm::reserve_map(align == 0x10000 ? vm::user64k : vm::user1m, 0, utils::align(size, 0x10000000), 0x401)) { if (const u32 addr = area->alloc(size, nullptr, align)) { ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(&dct)); if (alloc_addr) { vm::lock_sudo(addr, size); *alloc_addr = addr; return CELL_OK; } // Dealloc using the syscall sys_memory_free(cpu, addr); return CELL_EFAULT; } } dct.used -= size; return CELL_ENOMEM; } error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, cid, flags, alloc_addr); if (!size) { return {CELL_EALIGN, size}; } // Check allocation size const u32 align = flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : flags == 0 ? 0x100000 : 0; if (!align) { return {CELL_EINVAL, flags}; } if (size % align) { return {CELL_EALIGN, size}; } const auto ct = idm::get<lv2_memory_container>(cid, [&](lv2_memory_container& ct) -> CellError { // Try to get "physical memory" if (!ct.take(size)) { return CELL_ENOMEM; } return {}; }); if (!ct) { return CELL_ESRCH; } if (ct.ret) { return ct.ret; } if (const auto area = vm::reserve_map(align == 0x10000 ? vm::user64k : vm::user1m, 0, utils::align(size, 0x10000000), 0x401)) { if (const u32 addr = area->alloc(size)) { ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(ct.ptr.get())); if (alloc_addr) { vm::lock_sudo(addr, size); *alloc_addr = addr; return CELL_OK; } // Dealloc using the syscall sys_memory_free(cpu, addr); return CELL_EFAULT; } } ct->used -= size; return CELL_ENOMEM; } error_code sys_memory_free(cpu_thread& cpu, u32 addr) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_free(addr=0x%x)", addr); const auto ct = addr % 0x10000 ? nullptr : g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(nullptr); if (!ct) { return {CELL_EINVAL, addr}; } const auto size = (ensure(vm::dealloc(addr))); reader_lock{id_manager::g_mutex}, ct->used -= size; return CELL_OK; } error_code sys_memory_get_page_attribute(cpu_thread& cpu, u32 addr, vm::ptr<sys_page_attr_t> attr) { cpu.state += cpu_flag::wait; sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr, attr); vm::reader_lock rlock; if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR) { return CELL_EINVAL; } if (!vm::check_addr(attr.addr(), vm::page_readable, attr.size())) { return CELL_EFAULT; } attr->attribute = 0x40000ull; // SYS_MEMORY_PROT_READ_WRITE (TODO) attr->access_right = addr >> 28 == 0xdu ? SYS_MEMORY_ACCESS_RIGHT_PPU_THR : SYS_MEMORY_ACCESS_RIGHT_ANY;// (TODO) if (vm::check_addr(addr, vm::page_1m_size)) { attr->page_size = 0x100000; } else if (vm::check_addr(addr, vm::page_64k_size)) { attr->page_size = 0x10000; } else { attr->page_size = 4096; } attr->pad = 0; // Always write 0 return CELL_OK; } error_code sys_memory_get_user_memory_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_get_user_memory_size(mem_info=*0x%x)", mem_info); // Get "default" memory container auto& dct = g_fxo->get<lv2_memory_container>(); ::reader_lock lock(s_memstats_mtx); mem_info->total_user_memory = dct.size; mem_info->available_user_memory = dct.size - dct.used; // Scan other memory containers idm::select<lv2_memory_container>([&](u32, lv2_memory_container& ct) { mem_info->total_user_memory -= ct.size; }); return CELL_OK; } error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat) { cpu.state += cpu_flag::wait; sys_memory.todo("sys_memory_get_user_memory_stat(mem_stat=*0x%x)", mem_stat); return CELL_OK; } error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u32 size) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_container_create(cid=*0x%x, size=0x%x)", cid, size); // Round down to 1 MB granularity size &= ~0xfffff; if (!size) { return CELL_ENOMEM; } auto& dct = g_fxo->get<lv2_memory_container>(); std::lock_guard lock(s_memstats_mtx); // Try to obtain "physical memory" from the default container if (!dct.take(size)) { return CELL_ENOMEM; } // Create the memory container if (const u32 id = idm::make<lv2_memory_container>(size)) { *cid = id; return CELL_OK; } dct.used -= size; return CELL_EAGAIN; } error_code sys_memory_container_destroy(cpu_thread& cpu, u32 cid) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_container_destroy(cid=0x%x)", cid); std::lock_guard lock(s_memstats_mtx); const auto ct = idm::withdraw<lv2_memory_container>(cid, [](lv2_memory_container& ct) -> CellError { // Check if some memory is not deallocated (the container cannot be destroyed in this case) if (!ct.used.compare_and_swap_test(0, ct.size)) { return CELL_EBUSY; } return {}; }); if (!ct) { return CELL_ESRCH; } if (ct.ret) { return ct.ret; } // Return "physical memory" to the default container g_fxo->get<lv2_memory_container>().used -= ct->size; return CELL_OK; } error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info, u32 cid) { cpu.state += cpu_flag::wait; sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)", mem_info, cid); const auto ct = idm::get<lv2_memory_container>(cid); if (!ct) { return CELL_ESRCH; } mem_info->total_user_memory = ct->size; // Total container memory mem_info->available_user_memory = ct->size - ct->used; // Available container memory return CELL_OK; }
gpl-2.0
ethan2014/calories-lib
src/testing.cpp
1
5799
#include <iostream> #include <limits> #include "ct.hpp" void prompt_user() { std::string name; int sex; int age; int height_ft; int height_in; int height_cm; int weight_lb; int weight_kg; int exercise_level; int weight_goal; float weight_gain_goal; float weight_lose_goal; int measurement_system; std::cout << "-- welcome to calorie tracker --" << std::endl; std::cout << "what is your name? "; std::getline(std::cin, name); std::cout << "sex? "; std::cin >> sex; std::cout << "age? "; std::cin >> age; std::cout << "measurement system? "; std::cin >> measurement_system; std::cout << "exercise level? "; std::cin >> exercise_level; std::cout << "weight goal? "; std::cin >> weight_goal; ct::user::set_name(name); ct::user::set_measurement_system(measurement_system); ct::user::set_age(age); ct::user::set_sex(sex); ct::user::set_exercise_level(exercise_level); ct::user::set_weight_goal(weight_goal); if (measurement_system == ct::user::imperial) { std::cout << "height in feet? "; std::cin >> height_ft; std::cout << "inches? "; std::cin >> height_in; std::cout << "weight in pounds? "; std::cin >> weight_lb; if (weight_goal == ct::user::gain_weight) { std::cout << "weight gain per week (lb)? "; std::cin >> weight_gain_goal; ct::user::set_weight_gain_goal(weight_gain_goal); } else { std::cout << "weight lose per week (lb)? "; std::cin >> weight_lose_goal; ct::user::set_weight_lose_goal(weight_lose_goal); } ct::user::set_height(height_ft, height_in); ct::user::set_weight(weight_lb); } else { std::cout << "height in cm? "; std::cin >> height_cm; std::cout << "weight in kg? "; std::cin >> weight_kg; if (weight_goal == ct::user::gain_weight) { std::cout << "weight gain per week (kg)? "; std::cin >> weight_gain_goal; ct::user::set_weight_gain_goal(weight_gain_goal); } else { std::cout << "weight lose per week (kg)? "; std::cin >> weight_lose_goal; ct::user::set_weight_lose_goal(weight_lose_goal); } ct::user::set_height(height_cm); ct::user::set_weight(weight_kg); } } void list_meals() { if (!ct::day::current_day.has_meals()) { std::cout << "no meals eaten yet today" << std::endl; return; } std::vector<ct::meal::Meal> meals = ct::day::current_day.meals; for (auto meal : meals) { std::cout << "--------------------" << std::endl; std::cout << "meal name: " << meal.name << std::endl; std::cout << "total calories: " << meal.calories() << std::endl; for (auto food : meal.foods) { std::cout << "food: " << food.name() << std::endl; std::cout << "servings: " << food.servings << std::endl; std::cout << "calories: " << food.calories() << std::endl; } } std::cout << "--------------------" << std::endl; std::cout << "calories for today: " << ct::day::current_day.calories() << " / " << ct::user::calculate_calories() << std::endl; } void new_food(std::string name) { ct::food::FoodInfo info; info.name = name; std::cout << "serving size? "; std::cin >> info[ct::food::serving_size]; std::cout << "calories per serving? "; std::cin >> info[ct::food::calories]; ct::food::save_food_info(info); } void add_meal() { int num_items = 0; std::string name; std::cout << "--------------------" << std::endl; std::cout << "how many food items? "; std::cin >> num_items; std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); std::cout << "what is this meal called? "; std::getline(std::cin, name); ct::meal::Meal m; m.name = name; for (int i = 0; i < num_items; i++) { ct::food::FoodItem food; ct::food::FoodInfo info; int amount; std::cout << "name of item? "; std::getline(std::cin, name); if (!ct::food::food_info_exists(name)) { std::cout << "I dont know what that is yet, lets add it" << std::endl; new_food(name); } std::cout << "how many servings you eat? "; std::cin >> amount; info = ct::food::known_foods[name]; food.info = info; food.servings = amount; m.add_food(food); } ct::day::current_day.add_meal(m); } void remove_meal() { std::string name; std::cout << "name of meal? "; std::getline(std::cin, name); } int main() { // this is the first thing that gets called for the calorie tracker (ct) // library, it will initialize everything it needs (see ct.cpp for the // implementation of this function) ct::init(); // the ct library will know if the user has never used to program before, // so if thats the case we need their personal info if (ct::user::needs_user_data()) { prompt_user(); ct::user::save(); } else { std::cout << "welcome back " << ct::user::name << std::endl; } // this will just tell the user how many calories they need to eat every day, it // can throw exception so it is wrapped in a try/catch (see user.cpp for the implementation // of calculate_calories()) try { std::cout << "to achieve your goal, you must consume " << ct::user::calculate_calories() << " calories every day" << std::endl; } catch (std::string &s) { std::cout << s << std::endl; } // this will just continue to ask the user what they want to do, its like // the main menu for the program int input; do { std::cout << "++++++++++++++++++++" << std::endl; std::cout << "what do now?" << std::endl; std::cout << "-1: quit" << std::endl; std::cout << "0: list todays meals" << std::endl; std::cout << "1: add new meal" << std::endl; std::cout << "2: remove meal" << std::endl; std::cout << "> "; std::cin >> input; std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); if (input == 0) { list_meals(); } else if (input == 1) { add_meal(); } else if (input == 2) { remove_meal(); } } while (input != -1); std::cout << "goodbye!" << std::endl; }
gpl-2.0
zhaoxianpeng/linux-mini2440
drivers/input/s3c2410_ts.c
1
5686
#include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/init.h> #include <linux/serio.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/plat-s3c/regs-adc.h> #include <mach/regs-gpio.h> /* For ts.dev.id.version */ #define S3C2410TSVERSION 0x0101 #define WAIT4INT(x) (((x)<<8) | \ S3C2410_ADCTSC_YM_SEN | S3C2410_ADCTSC_YP_SEN | S3C2410_ADCTSC_XP_SEN | \ S3C2410_ADCTSC_XY_PST(3)) #define AUTOPST (S3C2410_ADCTSC_YM_SEN | S3C2410_ADCTSC_YP_SEN | S3C2410_ADCTSC_XP_SEN | \ S3C2410_ADCTSC_AUTO_PST | S3C2410_ADCTSC_XY_PST(0)) static char *s3c2410ts_name = "s3c2410 TouchScreen"; static struct input_dev *dev; static long xp; static long yp; static int count; static void __iomem *base_addr; static inline void s3c2410_ts_connect(void) { s3c2410_gpio_cfgpin(S3C2410_GPG12, S3C2410_GPG12_XMON); s3c2410_gpio_cfgpin(S3C2410_GPG13, S3C2410_GPG13_nXPON); s3c2410_gpio_cfgpin(S3C2410_GPG14, S3C2410_GPG14_YMON); s3c2410_gpio_cfgpin(S3C2410_GPG15, S3C2410_GPG15_nYPON); } static void touch_timer_fire(unsigned long data) { unsigned long data0; unsigned long data1; int updown; data0 = ioread32(base_addr+S3C2410_ADCDAT0); data1 = ioread32(base_addr+S3C2410_ADCDAT1); updown = (!(data0 & S3C2410_ADCDAT0_UPDOWN)) && (!(data1 & S3C2410_ADCDAT0_UPDOWN)); if (updown) { if (count != 0) { long tmp; tmp = xp; xp = yp; yp = tmp; xp >>= 2; yp >>= 2; input_report_abs(dev, ABS_X, xp); input_report_abs(dev, ABS_Y, yp); input_report_key(dev, BTN_TOUCH, 1); input_report_abs(dev, ABS_PRESSURE, 1); input_sync(dev); } xp = 0; yp = 0; count = 0; iowrite32(S3C2410_ADCTSC_PULL_UP_DISABLE | AUTOPST, base_addr+S3C2410_ADCTSC); iowrite32(ioread32(base_addr+S3C2410_ADCCON) | S3C2410_ADCCON_ENABLE_START, base_addr+S3C2410_ADCCON); } else { count = 0; input_report_key(dev, BTN_TOUCH, 0); input_report_abs(dev, ABS_PRESSURE, 0); input_sync(dev); iowrite32(WAIT4INT(0), base_addr+S3C2410_ADCTSC); } } static struct timer_list touch_timer = TIMER_INITIALIZER(touch_timer_fire, 0, 0); static irqreturn_t stylus_updown(int irq, void *dev_id) { unsigned long data0; unsigned long data1; int updown; data0 = ioread32(base_addr+S3C2410_ADCDAT0); data1 = ioread32(base_addr+S3C2410_ADCDAT1); updown = (!(data0 & S3C2410_ADCDAT0_UPDOWN)) && (!(data1 & S3C2410_ADCDAT0_UPDOWN)); if (updown) touch_timer_fire(0); return IRQ_HANDLED; } static irqreturn_t stylus_action(int irq, void *dev_id) { unsigned long data0; unsigned long data1; data0 = ioread32(base_addr+S3C2410_ADCDAT0); data1 = ioread32(base_addr+S3C2410_ADCDAT1); xp += data0 & S3C2410_ADCDAT0_XPDATA_MASK; yp += data1 & S3C2410_ADCDAT1_YPDATA_MASK; count++; if (count < (1<<2)) { iowrite32(S3C2410_ADCTSC_PULL_UP_DISABLE | AUTOPST, base_addr+S3C2410_ADCTSC); iowrite32(ioread32(base_addr+S3C2410_ADCCON) | S3C2410_ADCCON_ENABLE_START, base_addr+S3C2410_ADCCON); } else { mod_timer(&touch_timer, jiffies+1); iowrite32(WAIT4INT(1), base_addr+S3C2410_ADCTSC); } return IRQ_HANDLED; } static struct clk *adc_clock; static int __init s3c2410ts_init(void) { struct input_dev *input_dev; adc_clock = clk_get(NULL, "adc"); if (!adc_clock) { printk(KERN_ERR "failed to get adc clock source\n"); return -ENOENT; } clk_enable(adc_clock); base_addr=ioremap(S3C2410_PA_ADC,0x20); if (base_addr == NULL) { printk(KERN_ERR "Failed to remap register block\n"); return -ENOMEM; } /* Configure GPIOs */ s3c2410_ts_connect(); iowrite32(S3C2410_ADCCON_PRSCEN | S3C2410_ADCCON_PRSCVL(0xFF),\ base_addr+S3C2410_ADCCON); iowrite32(0xffff, base_addr+S3C2410_ADCDLY); iowrite32(WAIT4INT(0), base_addr+S3C2410_ADCTSC); /* Initialise input stuff */ input_dev = input_allocate_device(); if (!input_dev) { printk(KERN_ERR "Unable to allocate the input device !!\n"); return -ENOMEM; } dev = input_dev; dev->evbit[0] = BIT(EV_SYN) | BIT(EV_KEY) | BIT(EV_ABS); dev->keybit[BITS_TO_LONGS(BTN_TOUCH)] = BIT(BTN_TOUCH); input_set_abs_params(dev, ABS_X, 0, 0x3FF, 0, 0); input_set_abs_params(dev, ABS_Y, 0, 0x3FF, 0, 0); input_set_abs_params(dev, ABS_PRESSURE, 0, 1, 0, 0); dev->name = s3c2410ts_name; dev->id.bustype = BUS_RS232; dev->id.vendor = 0xDEAD; dev->id.product = 0xBEEF; dev->id.version = S3C2410TSVERSION; /* Get irqs */ if (request_irq(IRQ_ADC, stylus_action, IRQF_SAMPLE_RANDOM, "s3c2410_action", dev)) { printk(KERN_ERR "s3c2410_ts.c: Could not allocate ts IRQ_ADC !\n"); iounmap(base_addr); return -EIO; } if (request_irq(IRQ_TC, stylus_updown, IRQF_SAMPLE_RANDOM, "s3c2410_action", dev)) { printk(KERN_ERR "s3c2410_ts.c: Could not allocate ts IRQ_TC !\n"); iounmap(base_addr); return -EIO; } printk(KERN_INFO "%s successfully loaded\n", s3c2410ts_name); /* All went ok, so register to the input system */ input_register_device(dev); return 0; } static void __exit s3c2410ts_exit(void) { disable_irq(IRQ_ADC); disable_irq(IRQ_TC); free_irq(IRQ_TC,dev); free_irq(IRQ_ADC,dev); if (adc_clock) { clk_disable(adc_clock); clk_put(adc_clock); adc_clock = NULL; } input_unregister_device(dev); iounmap(base_addr); } module_init(s3c2410ts_init); module_exit(s3c2410ts_exit);
gpl-2.0
kofemann/linux-redpatch
drivers/net/atlx/atlx.c
1
7241
/* atlx.c -- common functions for Attansic network drivers * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Including this file like a header is a temporary hack, I promise. -- CHS */ #ifndef ATLX_C #define ATLX_C #include <linux/device.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include "atlx.h" static struct atlx_spi_flash_dev flash_table[] = { /* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, }; static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atlx_mii_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } /* * atlx_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atlx_set_mac(struct net_device *netdev, void *p) { struct atlx_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); netdev->addr_assign_type = NET_ADDR_PERM; atlx_set_mac_addr(&adapter->hw); return 0; } static void atlx_check_for_link(struct atlx_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; spin_lock(&adapter->lock); adapter->phy_timer_pending = false; atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->lock); /* notify upper layer link down ASAP */ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ dev_info(&adapter->pdev->dev, "%s link is down\n", netdev->name); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); } } schedule_work(&adapter->link_chg_task); } /* * atlx_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atlx_set_multi(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); struct atlx_hw *hw = &adapter->hw; struct dev_mc_list *mc_ptr; u32 rctl; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) rctl |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) { rctl |= MAC_CTRL_MC_ALL_EN; rctl &= ~MAC_CTRL_PROMIS_EN; } else rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); /* clear the old settings from the multicast hash table */ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); /* compute mc addresses' hash value ,and put it into hash table */ for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); atlx_hash_set(hw, hash_value); } } /* * atlx_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static void atlx_irq_enable(struct atlx_adapter *adapter) { iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); ioread32(adapter->hw.hw_addr + REG_IMR); } /* * atlx_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static void atlx_irq_disable(struct atlx_adapter *adapter) { iowrite32(0, adapter->hw.hw_addr + REG_IMR); ioread32(adapter->hw.hw_addr + REG_IMR); synchronize_irq(adapter->pdev->irq); } static void atlx_clear_phy_int(struct atlx_adapter *adapter) { u16 phy_data; unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); atlx_read_phy_reg(&adapter->hw, 19, &phy_data); spin_unlock_irqrestore(&adapter->lock, flags); } /* * atlx_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ static void atlx_tx_timeout(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->tx_timeout_task); } /* * atlx_link_chg_task - deal with link change event Out of interrupt context */ static void atlx_link_chg_task(struct work_struct *work) { struct atlx_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atlx_adapter, link_chg_task); spin_lock_irqsave(&adapter->lock, flags); atlx_check_link(adapter); spin_unlock_irqrestore(&adapter->lock, flags); } static void atlx_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct atlx_adapter *adapter = netdev_priv(netdev); unsigned long flags; u32 ctrl; spin_lock_irqsave(&adapter->lock, flags); /* atlx_irq_disable(adapter); FIXME: confirm/remove */ adapter->vlgrp = grp; if (grp) { /* enable VLAN tag insert/strip */ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); ctrl |= MAC_CTRL_RMV_VLAN; iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); } else { /* disable VLAN tag insert/strip */ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); ctrl &= ~MAC_CTRL_RMV_VLAN; iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); } /* atlx_irq_enable(adapter); FIXME */ spin_unlock_irqrestore(&adapter->lock, flags); } static void atlx_restore_vlan(struct atlx_adapter *adapter) { atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp); } #endif /* ATLX_C */
gpl-2.0
skyinsky/cobaya
librcfproto/swig/RCFProto_Java_impl.cpp
1
266550
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 2.0.4 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ #define SWIGJAVA #define SWIG_DIRECTORS #ifdef __cplusplus /* SwigValueWrapper is described in swig.swg */ template<typename T> class SwigValueWrapper { struct SwigMovePointer { T *ptr; SwigMovePointer(T *p) : ptr(p) { } ~SwigMovePointer() { delete ptr; } SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; } } pointer; SwigValueWrapper& operator=(const SwigValueWrapper<T>& rhs); SwigValueWrapper(const SwigValueWrapper<T>& rhs); public: SwigValueWrapper() : pointer(0) { } SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; } operator T&() const { return *pointer.ptr; } T *operator&() { return pointer.ptr; } }; template <typename T> T SwigValueInit() { return T(); } #endif /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* exporting methods */ #if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Fix for jlong on some versions of gcc on Windows */ #if defined(__GNUC__) && !defined(__INTEL_COMPILER) typedef long long __int64; #endif /* Fix for jlong on 64-bit x86 Solaris */ #if defined(__x86_64) # ifdef _LP64 # undef _LP64 # endif #endif #include <jni.h> #include <stdlib.h> #include <string.h> /* Support for throwing Java exceptions */ typedef enum { SWIG_JavaOutOfMemoryError = 1, SWIG_JavaIOException, SWIG_JavaRuntimeException, SWIG_JavaIndexOutOfBoundsException, SWIG_JavaArithmeticException, SWIG_JavaIllegalArgumentException, SWIG_JavaNullPointerException, SWIG_JavaDirectorPureVirtual, SWIG_JavaUnknownError } SWIG_JavaExceptionCodes; typedef struct { SWIG_JavaExceptionCodes code; const char *java_exception; } SWIG_JavaExceptions_t; static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { jclass excep; static const SWIG_JavaExceptions_t java_exceptions[] = { { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, { SWIG_JavaIOException, "java/io/IOException" }, { SWIG_JavaRuntimeException, "java/lang/RuntimeException" }, { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" }, { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, { SWIG_JavaUnknownError, "java/lang/UnknownError" }, { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } }; const SWIG_JavaExceptions_t *except_ptr = java_exceptions; while (except_ptr->code != code && except_ptr->code) except_ptr++; jenv->ExceptionClear(); excep = jenv->FindClass(except_ptr->java_exception); if (excep) jenv->ThrowNew(excep, msg); } /* Contract support */ #define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 /* ----------------------------------------------------------------------------- * director.swg * * This file contains support for director classes that proxy * method calls from C++ to Java extensions. * ----------------------------------------------------------------------------- */ #ifdef __cplusplus #if defined(DEBUG_DIRECTOR_OWNED) #include <iostream> #endif namespace Swig { /* Java object wrapper */ class JObjectWrapper { public: JObjectWrapper() : jthis_(NULL), weak_global_(true) { } ~JObjectWrapper() { jthis_ = NULL; weak_global_ = true; } bool set(JNIEnv *jenv, jobject jobj, bool mem_own, bool weak_global) { if (!jthis_) { weak_global_ = weak_global; if (jobj) jthis_ = ((weak_global_ || !mem_own) ? jenv->NewWeakGlobalRef(jobj) : jenv->NewGlobalRef(jobj)); #if defined(DEBUG_DIRECTOR_OWNED) std::cout << "JObjectWrapper::set(" << jobj << ", " << (weak_global ? "weak_global" : "global_ref") << ") -> " << jthis_ << std::endl; #endif return true; } else { #if defined(DEBUG_DIRECTOR_OWNED) std::cout << "JObjectWrapper::set(" << jobj << ", " << (weak_global ? "weak_global" : "global_ref") << ") -> already set" << std::endl; #endif return false; } } jobject get(JNIEnv *jenv) const { #if defined(DEBUG_DIRECTOR_OWNED) std::cout << "JObjectWrapper::get("; if (jthis_) std::cout << jthis_; else std::cout << "null"; std::cout << ") -> return new local ref" << std::endl; #endif return (jthis_ ? jenv->NewLocalRef(jthis_) : jthis_); } void release(JNIEnv *jenv) { #if defined(DEBUG_DIRECTOR_OWNED) std::cout << "JObjectWrapper::release(" << jthis_ << "): " << (weak_global_ ? "weak global ref" : "global ref") << std::endl; #endif if (jthis_) { if (weak_global_) { if (jenv->IsSameObject(jthis_, NULL) == JNI_FALSE) jenv->DeleteWeakGlobalRef((jweak)jthis_); } else jenv->DeleteGlobalRef(jthis_); } jthis_ = NULL; weak_global_ = true; } jobject peek() { return jthis_; } /* Java proxy releases ownership of C++ object, C++ object is now responsible for destruction (creates NewGlobalRef to pin Java proxy) */ void java_change_ownership(JNIEnv *jenv, jobject jself, bool take_or_release) { if (take_or_release) { /* Java takes ownership of C++ object's lifetime. */ if (!weak_global_) { jenv->DeleteGlobalRef(jthis_); jthis_ = jenv->NewWeakGlobalRef(jself); weak_global_ = true; } } else { /* Java releases ownership of C++ object's lifetime */ if (weak_global_) { jenv->DeleteWeakGlobalRef((jweak)jthis_); jthis_ = jenv->NewGlobalRef(jself); weak_global_ = false; } } } private: /* pointer to Java object */ jobject jthis_; /* Local or global reference flag */ bool weak_global_; }; /* director base class */ class Director { /* pointer to Java virtual machine */ JavaVM *swig_jvm_; protected: #if defined (_MSC_VER) && (_MSC_VER<1300) class JNIEnvWrapper; friend class JNIEnvWrapper; #endif /* Utility class for managing the JNI environment */ class JNIEnvWrapper { const Director *director_; JNIEnv *jenv_; public: JNIEnvWrapper(const Director *director) : director_(director), jenv_(0) { #if defined(SWIG_JAVA_ATTACH_CURRENT_THREAD_AS_DAEMON) // Attach a daemon thread to the JVM. Useful when the JVM should not wait for // the thread to exit upon shutdown. Only for jdk-1.4 and later. director_->swig_jvm_->AttachCurrentThreadAsDaemon((void **) &jenv_, NULL); #else director_->swig_jvm_->AttachCurrentThread((void **) &jenv_, NULL); #endif } ~JNIEnvWrapper() { #if !defined(SWIG_JAVA_NO_DETACH_CURRENT_THREAD) // Some JVMs, eg jdk-1.4.2 and lower on Solaris have a bug and crash with the DetachCurrentThread call. // However, without this call, the JVM hangs on exit when the thread was not created by the JVM and creates a memory leak. director_->swig_jvm_->DetachCurrentThread(); #endif } JNIEnv *getJNIEnv() const { return jenv_; } }; /* Java object wrapper */ JObjectWrapper swig_self_; /* Disconnect director from Java object */ void swig_disconnect_director_self(const char *disconn_method) { JNIEnvWrapper jnienv(this) ; JNIEnv *jenv = jnienv.getJNIEnv() ; jobject jobj = swig_self_.peek(); #if defined(DEBUG_DIRECTOR_OWNED) std::cout << "Swig::Director::disconnect_director_self(" << jobj << ")" << std::endl; #endif if (jobj && jenv->IsSameObject(jobj, NULL) == JNI_FALSE) { jmethodID disconn_meth = jenv->GetMethodID(jenv->GetObjectClass(jobj), disconn_method, "()V"); if (disconn_meth) { #if defined(DEBUG_DIRECTOR_OWNED) std::cout << "Swig::Director::disconnect_director_self upcall to " << disconn_method << std::endl; #endif jenv->CallVoidMethod(jobj, disconn_meth); } } } public: Director(JNIEnv *jenv) : swig_jvm_((JavaVM *) NULL), swig_self_() { /* Acquire the Java VM pointer */ jenv->GetJavaVM(&swig_jvm_); } virtual ~Director() { JNIEnvWrapper jnienv(this) ; JNIEnv *jenv = jnienv.getJNIEnv() ; swig_self_.release(jenv); } bool swig_set_self(JNIEnv *jenv, jobject jself, bool mem_own, bool weak_global) { return swig_self_.set(jenv, jself, mem_own, weak_global); } jobject swig_get_self(JNIEnv *jenv) const { return swig_self_.get(jenv); } // Change C++ object's ownership, relative to Java void swig_java_change_ownership(JNIEnv *jenv, jobject jself, bool take_or_release) { swig_self_.java_change_ownership(jenv, jself, take_or_release); } }; } #endif /* __cplusplus */ namespace Swig { namespace { jclass jclass_RCFProtoJNI = NULL; jmethodID director_methids[3]; } } #if defined(SWIG_NOINCLUDE) || defined(SWIG_NOARRAYS) int SWIG_JavaArrayInBool (JNIEnv *jenv, jboolean **jarr, bool **carr, jbooleanArray input); void SWIG_JavaArrayArgoutBool (JNIEnv *jenv, jboolean *jarr, bool *carr, jbooleanArray input); jbooleanArray SWIG_JavaArrayOutBool (JNIEnv *jenv, bool *result, jsize sz); int SWIG_JavaArrayInSchar (JNIEnv *jenv, jbyte **jarr, signed char **carr, jbyteArray input); void SWIG_JavaArrayArgoutSchar (JNIEnv *jenv, jbyte *jarr, signed char *carr, jbyteArray input); jbyteArray SWIG_JavaArrayOutSchar (JNIEnv *jenv, signed char *result, jsize sz); int SWIG_JavaArrayInUchar (JNIEnv *jenv, jshort **jarr, unsigned char **carr, jshortArray input); void SWIG_JavaArrayArgoutUchar (JNIEnv *jenv, jshort *jarr, unsigned char *carr, jshortArray input); jshortArray SWIG_JavaArrayOutUchar (JNIEnv *jenv, unsigned char *result, jsize sz); int SWIG_JavaArrayInShort (JNIEnv *jenv, jshort **jarr, short **carr, jshortArray input); void SWIG_JavaArrayArgoutShort (JNIEnv *jenv, jshort *jarr, short *carr, jshortArray input); jshortArray SWIG_JavaArrayOutShort (JNIEnv *jenv, short *result, jsize sz); int SWIG_JavaArrayInUshort (JNIEnv *jenv, jint **jarr, unsigned short **carr, jintArray input); void SWIG_JavaArrayArgoutUshort (JNIEnv *jenv, jint *jarr, unsigned short *carr, jintArray input); jintArray SWIG_JavaArrayOutUshort (JNIEnv *jenv, unsigned short *result, jsize sz); int SWIG_JavaArrayInInt (JNIEnv *jenv, jint **jarr, int **carr, jintArray input); void SWIG_JavaArrayArgoutInt (JNIEnv *jenv, jint *jarr, int *carr, jintArray input); jintArray SWIG_JavaArrayOutInt (JNIEnv *jenv, int *result, jsize sz); int SWIG_JavaArrayInUint (JNIEnv *jenv, jlong **jarr, unsigned int **carr, jlongArray input); void SWIG_JavaArrayArgoutUint (JNIEnv *jenv, jlong *jarr, unsigned int *carr, jlongArray input); jlongArray SWIG_JavaArrayOutUint (JNIEnv *jenv, unsigned int *result, jsize sz); int SWIG_JavaArrayInLong (JNIEnv *jenv, jint **jarr, long **carr, jintArray input); void SWIG_JavaArrayArgoutLong (JNIEnv *jenv, jint *jarr, long *carr, jintArray input); jintArray SWIG_JavaArrayOutLong (JNIEnv *jenv, long *result, jsize sz); int SWIG_JavaArrayInUlong (JNIEnv *jenv, jlong **jarr, unsigned long **carr, jlongArray input); void SWIG_JavaArrayArgoutUlong (JNIEnv *jenv, jlong *jarr, unsigned long *carr, jlongArray input); jlongArray SWIG_JavaArrayOutUlong (JNIEnv *jenv, unsigned long *result, jsize sz); int SWIG_JavaArrayInLonglong (JNIEnv *jenv, jlong **jarr, jlong **carr, jlongArray input); void SWIG_JavaArrayArgoutLonglong (JNIEnv *jenv, jlong *jarr, jlong *carr, jlongArray input); jlongArray SWIG_JavaArrayOutLonglong (JNIEnv *jenv, jlong *result, jsize sz); int SWIG_JavaArrayInFloat (JNIEnv *jenv, jfloat **jarr, float **carr, jfloatArray input); void SWIG_JavaArrayArgoutFloat (JNIEnv *jenv, jfloat *jarr, float *carr, jfloatArray input); jfloatArray SWIG_JavaArrayOutFloat (JNIEnv *jenv, float *result, jsize sz); int SWIG_JavaArrayInDouble (JNIEnv *jenv, jdouble **jarr, double **carr, jdoubleArray input); void SWIG_JavaArrayArgoutDouble (JNIEnv *jenv, jdouble *jarr, double *carr, jdoubleArray input); jdoubleArray SWIG_JavaArrayOutDouble (JNIEnv *jenv, double *result, jsize sz); #else /* bool[] support */ int SWIG_JavaArrayInBool (JNIEnv *jenv, jboolean **jarr, bool **carr, jbooleanArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetBooleanArrayElements(input, 0); if (!*jarr) return 0; *carr = new bool[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = ((*jarr)[i] != 0); return 1; } void SWIG_JavaArrayArgoutBool (JNIEnv *jenv, jboolean *jarr, bool *carr, jbooleanArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jboolean)carr[i]; jenv->ReleaseBooleanArrayElements(input, jarr, 0); } jbooleanArray SWIG_JavaArrayOutBool (JNIEnv *jenv, bool *result, jsize sz) { jboolean *arr; int i; jbooleanArray jresult = jenv->NewBooleanArray(sz); if (!jresult) return NULL; arr = jenv->GetBooleanArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jboolean)result[i]; jenv->ReleaseBooleanArrayElements(jresult, arr, 0); return jresult; } /* signed char[] support */ int SWIG_JavaArrayInSchar (JNIEnv *jenv, jbyte **jarr, signed char **carr, jbyteArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetByteArrayElements(input, 0); if (!*jarr) return 0; *carr = new signed char[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (signed char)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutSchar (JNIEnv *jenv, jbyte *jarr, signed char *carr, jbyteArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jbyte)carr[i]; jenv->ReleaseByteArrayElements(input, jarr, 0); } jbyteArray SWIG_JavaArrayOutSchar (JNIEnv *jenv, signed char *result, jsize sz) { jbyte *arr; int i; jbyteArray jresult = jenv->NewByteArray(sz); if (!jresult) return NULL; arr = jenv->GetByteArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jbyte)result[i]; jenv->ReleaseByteArrayElements(jresult, arr, 0); return jresult; } /* unsigned char[] support */ int SWIG_JavaArrayInUchar (JNIEnv *jenv, jshort **jarr, unsigned char **carr, jshortArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetShortArrayElements(input, 0); if (!*jarr) return 0; *carr = new unsigned char[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned char)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUchar (JNIEnv *jenv, jshort *jarr, unsigned char *carr, jshortArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jshort)carr[i]; jenv->ReleaseShortArrayElements(input, jarr, 0); } jshortArray SWIG_JavaArrayOutUchar (JNIEnv *jenv, unsigned char *result, jsize sz) { jshort *arr; int i; jshortArray jresult = jenv->NewShortArray(sz); if (!jresult) return NULL; arr = jenv->GetShortArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jshort)result[i]; jenv->ReleaseShortArrayElements(jresult, arr, 0); return jresult; } /* short[] support */ int SWIG_JavaArrayInShort (JNIEnv *jenv, jshort **jarr, short **carr, jshortArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetShortArrayElements(input, 0); if (!*jarr) return 0; *carr = new short[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (short)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutShort (JNIEnv *jenv, jshort *jarr, short *carr, jshortArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jshort)carr[i]; jenv->ReleaseShortArrayElements(input, jarr, 0); } jshortArray SWIG_JavaArrayOutShort (JNIEnv *jenv, short *result, jsize sz) { jshort *arr; int i; jshortArray jresult = jenv->NewShortArray(sz); if (!jresult) return NULL; arr = jenv->GetShortArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jshort)result[i]; jenv->ReleaseShortArrayElements(jresult, arr, 0); return jresult; } /* unsigned short[] support */ int SWIG_JavaArrayInUshort (JNIEnv *jenv, jint **jarr, unsigned short **carr, jintArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetIntArrayElements(input, 0); if (!*jarr) return 0; *carr = new unsigned short[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned short)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUshort (JNIEnv *jenv, jint *jarr, unsigned short *carr, jintArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jint)carr[i]; jenv->ReleaseIntArrayElements(input, jarr, 0); } jintArray SWIG_JavaArrayOutUshort (JNIEnv *jenv, unsigned short *result, jsize sz) { jint *arr; int i; jintArray jresult = jenv->NewIntArray(sz); if (!jresult) return NULL; arr = jenv->GetIntArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jint)result[i]; jenv->ReleaseIntArrayElements(jresult, arr, 0); return jresult; } /* int[] support */ int SWIG_JavaArrayInInt (JNIEnv *jenv, jint **jarr, int **carr, jintArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetIntArrayElements(input, 0); if (!*jarr) return 0; *carr = new int[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (int)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutInt (JNIEnv *jenv, jint *jarr, int *carr, jintArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jint)carr[i]; jenv->ReleaseIntArrayElements(input, jarr, 0); } jintArray SWIG_JavaArrayOutInt (JNIEnv *jenv, int *result, jsize sz) { jint *arr; int i; jintArray jresult = jenv->NewIntArray(sz); if (!jresult) return NULL; arr = jenv->GetIntArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jint)result[i]; jenv->ReleaseIntArrayElements(jresult, arr, 0); return jresult; } /* unsigned int[] support */ int SWIG_JavaArrayInUint (JNIEnv *jenv, jlong **jarr, unsigned int **carr, jlongArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetLongArrayElements(input, 0); if (!*jarr) return 0; *carr = new unsigned int[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned int)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUint (JNIEnv *jenv, jlong *jarr, unsigned int *carr, jlongArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jlong)carr[i]; jenv->ReleaseLongArrayElements(input, jarr, 0); } jlongArray SWIG_JavaArrayOutUint (JNIEnv *jenv, unsigned int *result, jsize sz) { jlong *arr; int i; jlongArray jresult = jenv->NewLongArray(sz); if (!jresult) return NULL; arr = jenv->GetLongArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jlong)result[i]; jenv->ReleaseLongArrayElements(jresult, arr, 0); return jresult; } /* long[] support */ int SWIG_JavaArrayInLong (JNIEnv *jenv, jint **jarr, long **carr, jintArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetIntArrayElements(input, 0); if (!*jarr) return 0; *carr = new long[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (long)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutLong (JNIEnv *jenv, jint *jarr, long *carr, jintArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jint)carr[i]; jenv->ReleaseIntArrayElements(input, jarr, 0); } jintArray SWIG_JavaArrayOutLong (JNIEnv *jenv, long *result, jsize sz) { jint *arr; int i; jintArray jresult = jenv->NewIntArray(sz); if (!jresult) return NULL; arr = jenv->GetIntArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jint)result[i]; jenv->ReleaseIntArrayElements(jresult, arr, 0); return jresult; } /* unsigned long[] support */ int SWIG_JavaArrayInUlong (JNIEnv *jenv, jlong **jarr, unsigned long **carr, jlongArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetLongArrayElements(input, 0); if (!*jarr) return 0; *carr = new unsigned long[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned long)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUlong (JNIEnv *jenv, jlong *jarr, unsigned long *carr, jlongArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jlong)carr[i]; jenv->ReleaseLongArrayElements(input, jarr, 0); } jlongArray SWIG_JavaArrayOutUlong (JNIEnv *jenv, unsigned long *result, jsize sz) { jlong *arr; int i; jlongArray jresult = jenv->NewLongArray(sz); if (!jresult) return NULL; arr = jenv->GetLongArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jlong)result[i]; jenv->ReleaseLongArrayElements(jresult, arr, 0); return jresult; } /* jlong[] support */ int SWIG_JavaArrayInLonglong (JNIEnv *jenv, jlong **jarr, jlong **carr, jlongArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetLongArrayElements(input, 0); if (!*jarr) return 0; *carr = new jlong[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (jlong)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutLonglong (JNIEnv *jenv, jlong *jarr, jlong *carr, jlongArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jlong)carr[i]; jenv->ReleaseLongArrayElements(input, jarr, 0); } jlongArray SWIG_JavaArrayOutLonglong (JNIEnv *jenv, jlong *result, jsize sz) { jlong *arr; int i; jlongArray jresult = jenv->NewLongArray(sz); if (!jresult) return NULL; arr = jenv->GetLongArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jlong)result[i]; jenv->ReleaseLongArrayElements(jresult, arr, 0); return jresult; } /* float[] support */ int SWIG_JavaArrayInFloat (JNIEnv *jenv, jfloat **jarr, float **carr, jfloatArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetFloatArrayElements(input, 0); if (!*jarr) return 0; *carr = new float[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (float)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutFloat (JNIEnv *jenv, jfloat *jarr, float *carr, jfloatArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jfloat)carr[i]; jenv->ReleaseFloatArrayElements(input, jarr, 0); } jfloatArray SWIG_JavaArrayOutFloat (JNIEnv *jenv, float *result, jsize sz) { jfloat *arr; int i; jfloatArray jresult = jenv->NewFloatArray(sz); if (!jresult) return NULL; arr = jenv->GetFloatArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jfloat)result[i]; jenv->ReleaseFloatArrayElements(jresult, arr, 0); return jresult; } /* double[] support */ int SWIG_JavaArrayInDouble (JNIEnv *jenv, jdouble **jarr, double **carr, jdoubleArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = jenv->GetArrayLength(input); *jarr = jenv->GetDoubleArrayElements(input, 0); if (!*jarr) return 0; *carr = new double[sz]; if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (double)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutDouble (JNIEnv *jenv, jdouble *jarr, double *carr, jdoubleArray input) { int i; jsize sz = jenv->GetArrayLength(input); for (i=0; i<sz; i++) jarr[i] = (jdouble)carr[i]; jenv->ReleaseDoubleArrayElements(input, jarr, 0); } jdoubleArray SWIG_JavaArrayOutDouble (JNIEnv *jenv, double *result, jsize sz) { jdouble *arr; int i; jdoubleArray jresult = jenv->NewDoubleArray(sz); if (!jresult) return NULL; arr = jenv->GetDoubleArrayElements(jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jdouble)result[i]; jenv->ReleaseDoubleArrayElements(jresult, arr, 0); return jresult; } #endif #include <stdint.h> // Use the C99 official header SWIGINTERN void SWIG_JavaException(JNIEnv *jenv, int code, const char *msg) { SWIG_JavaExceptionCodes exception_code = SWIG_JavaUnknownError; switch(code) { case SWIG_MemoryError: exception_code = SWIG_JavaOutOfMemoryError; break; case SWIG_IOError: exception_code = SWIG_JavaIOException; break; case SWIG_SystemError: case SWIG_RuntimeError: exception_code = SWIG_JavaRuntimeException; break; case SWIG_OverflowError: case SWIG_IndexError: exception_code = SWIG_JavaIndexOutOfBoundsException; break; case SWIG_DivisionByZero: exception_code = SWIG_JavaArithmeticException; break; case SWIG_SyntaxError: case SWIG_ValueError: case SWIG_TypeError: exception_code = SWIG_JavaIllegalArgumentException; break; case SWIG_UnknownError: default: exception_code = SWIG_JavaUnknownError; break; } SWIG_JavaThrowException(jenv, exception_code, msg); } #include <stdexcept> #include <string> #include <stdexcept> #include <vector> #include <stdexcept> #include "RCFProto.hpp" SWIGINTERN std::vector< enum RCF::TransportProtocol >::const_reference std_vector_Sl_RCF_TransportProtocol_Sg__get(std::vector< RCF::TransportProtocol > *self,int i){ int size = int(self->size()); if (i>=0 && i<size) return (*self)[i]; else throw std::out_of_range("vector index out of range"); } SWIGINTERN void std_vector_Sl_RCF_TransportProtocol_Sg__set(std::vector< RCF::TransportProtocol > *self,int i,std::vector< enum RCF::TransportProtocol >::value_type const &val){ int size = int(self->size()); if (i>=0 && i<size) (*self)[i] = val; else throw std::out_of_range("vector index out of range"); } struct SWIG_null_deleter { void operator() (void const *) const { } }; #define SWIG_NO_NULL_DELETER_0 , SWIG_null_deleter() #define SWIG_NO_NULL_DELETER_1 #define SWIG_NO_NULL_DELETER_SWIG_POINTER_NEW #define SWIG_NO_NULL_DELETER_SWIG_POINTER_OWN /* --------------------------------------------------- * C++ director class methods * --------------------------------------------------- */ #include "RCFProto_Java_impl.h" SwigDirector__SwigCallback::SwigDirector__SwigCallback(JNIEnv *jenv) : RCF::_SwigCallback(), Swig::Director(jenv) { } SwigDirector__SwigCallback::~SwigDirector__SwigCallback() { swig_disconnect_director_self("swigDirectorDisconnect"); } void SwigDirector__SwigCallback::Run() { JNIEnvWrapper swigjnienv(this) ; JNIEnv * jenv = swigjnienv.getJNIEnv() ; jobject swigjobj = (jobject) NULL ; if (!swig_override[0]) { RCF::_SwigCallback::Run(); return; } swigjobj = swig_get_self(jenv); if (swigjobj && jenv->IsSameObject(swigjobj, NULL) == JNI_FALSE) { jenv->CallStaticVoidMethod(Swig::jclass_RCFProtoJNI, Swig::director_methids[0], swigjobj); if (jenv->ExceptionCheck() == JNI_TRUE) return ; } else { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null upcall object"); } if (swigjobj) jenv->DeleteLocalRef(swigjobj); } void SwigDirector__SwigCallback::ProtoRpcBegin(RCF::_SwigCallbackArgs *args, RCF::RcfProtoServer *server, RCF::RcfProtoSession *context, std::string const &serviceName, int methodId) { JNIEnvWrapper swigjnienv(this) ; JNIEnv * jenv = swigjnienv.getJNIEnv() ; jobject swigjobj = (jobject) NULL ; jlong jargs = 0 ; jlong jserver = 0 ; jlong jcontext = 0 ; jstring jserviceName = 0 ; jint jmethodId ; if (!swig_override[1]) { RCF::_SwigCallback::ProtoRpcBegin(args,server,context,serviceName,methodId); return; } swigjobj = swig_get_self(jenv); if (swigjobj && jenv->IsSameObject(swigjobj, NULL) == JNI_FALSE) { *((RCF::_SwigCallbackArgs **)&jargs) = (RCF::_SwigCallbackArgs *) args; *((RCF::RcfProtoServer **)&jserver) = (RCF::RcfProtoServer *) server; *((RCF::RcfProtoSession **)&jcontext) = (RCF::RcfProtoSession *) context; jserviceName = jenv->NewStringUTF((&serviceName)->c_str()); jmethodId = (jint) methodId; jenv->CallStaticVoidMethod(Swig::jclass_RCFProtoJNI, Swig::director_methids[1], swigjobj, jargs, jserver, jcontext, jserviceName, jmethodId); if (jenv->ExceptionCheck() == JNI_TRUE) return ; } else { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null upcall object"); } if (swigjobj) jenv->DeleteLocalRef(swigjobj); } bool SwigDirector__SwigCallback::ValidateCertificate(RCF::_SwigCallbackArgs *args) { bool c_result = SwigValueInit< bool >() ; jboolean jresult = 0 ; JNIEnvWrapper swigjnienv(this) ; JNIEnv * jenv = swigjnienv.getJNIEnv() ; jobject swigjobj = (jobject) NULL ; jlong jargs = 0 ; if (!swig_override[2]) { return RCF::_SwigCallback::ValidateCertificate(args); } swigjobj = swig_get_self(jenv); if (swigjobj && jenv->IsSameObject(swigjobj, NULL) == JNI_FALSE) { *((RCF::_SwigCallbackArgs **)&jargs) = (RCF::_SwigCallbackArgs *) args; jresult = (jboolean) jenv->CallStaticBooleanMethod(Swig::jclass_RCFProtoJNI, Swig::director_methids[2], swigjobj, jargs); if (jenv->ExceptionCheck() == JNI_TRUE) return c_result; c_result = jresult ? true : false; } else { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null upcall object"); } if (swigjobj) jenv->DeleteLocalRef(swigjobj); return c_result; } void SwigDirector__SwigCallback::swig_connect_director(JNIEnv *jenv, jobject jself, jclass jcls, bool swig_mem_own, bool weak_global) { static struct { const char *mname; const char *mdesc; jmethodID base_methid; } methods[] = { { "Run", "()V", NULL }, { "ProtoRpcBegin", "(Lcom/deltavsoft/rcfproto/_SwigCallbackArgs;Lcom/deltavsoft/rcfproto/RcfProtoServer;Lcom/deltavsoft/rcfproto/RcfProtoSession;Ljava/lang/String;I)V", NULL }, { "ValidateCertificate", "(Lcom/deltavsoft/rcfproto/_SwigCallbackArgs;)Z", NULL } }; static jclass baseclass = 0 ; if (swig_set_self(jenv, jself, swig_mem_own, weak_global)) { if (!baseclass) { baseclass = jenv->FindClass("com/deltavsoft/rcfproto/_SwigCallback"); if (!baseclass) return; baseclass = (jclass) jenv->NewGlobalRef(baseclass); } bool derived = (jenv->IsSameObject(baseclass, jcls) ? false : true); for (int i = 0; i < 3; ++i) { if (!methods[i].base_methid) { methods[i].base_methid = jenv->GetMethodID(baseclass, methods[i].mname, methods[i].mdesc); if (!methods[i].base_methid) return; } swig_override[i] = false; if (derived) { jmethodID methid = jenv->GetMethodID(jcls, methods[i].mname, methods[i].mdesc); swig_override[i] = (methid != methods[i].base_methid); jenv->ExceptionClear(); } } } } #ifdef __cplusplus extern "C" { #endif SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1TransportProtocolList_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { jlong jresult = 0 ; std::vector< RCF::TransportProtocol > *result = 0 ; (void)jenv; (void)jcls; { try { result = (std::vector< RCF::TransportProtocol > *)new std::vector< RCF::TransportProtocol >(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(std::vector< RCF::TransportProtocol > **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1TransportProtocolList_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; std::vector< enum RCF::TransportProtocol >::size_type arg1 ; std::vector< RCF::TransportProtocol > *result = 0 ; (void)jenv; (void)jcls; arg1 = (std::vector< enum RCF::TransportProtocol >::size_type)jarg1; { try { result = (std::vector< RCF::TransportProtocol > *)new std::vector< RCF::TransportProtocol >(arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(std::vector< RCF::TransportProtocol > **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1size(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; std::vector< enum RCF::TransportProtocol >::size_type result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; { try { result = ((std::vector< RCF::TransportProtocol > const *)arg1)->size(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1capacity(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; std::vector< enum RCF::TransportProtocol >::size_type result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; { try { result = ((std::vector< RCF::TransportProtocol > const *)arg1)->capacity(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1reserve(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; std::vector< enum RCF::TransportProtocol >::size_type arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; arg2 = (std::vector< enum RCF::TransportProtocol >::size_type)jarg2; { try { (arg1)->reserve(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1isEmpty(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; { try { result = (bool)((std::vector< RCF::TransportProtocol > const *)arg1)->empty(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1clear(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; { try { (arg1)->clear(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1add(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; std::vector< enum RCF::TransportProtocol >::value_type *arg2 = 0 ; std::vector< enum RCF::TransportProtocol >::value_type temp2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; temp2 = (std::vector< enum RCF::TransportProtocol >::value_type)jarg2; arg2 = &temp2; { try { (arg1)->push_back((std::vector< enum RCF::TransportProtocol >::value_type const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { jint jresult = 0 ; std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; int arg2 ; std::vector< enum RCF::TransportProtocol >::value_type *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; arg2 = (int)jarg2; { try { try { result = (std::vector< enum RCF::TransportProtocol >::value_type *) &std_vector_Sl_RCF_TransportProtocol_Sg__get(arg1,arg2); } catch(std::out_of_range &_e) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, (&_e)->what()); return 0; } } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)*result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TransportProtocolList_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jint jarg3) { std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; int arg2 ; std::vector< enum RCF::TransportProtocol >::value_type *arg3 = 0 ; std::vector< enum RCF::TransportProtocol >::value_type temp3 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; arg2 = (int)jarg2; temp3 = (std::vector< enum RCF::TransportProtocol >::value_type)jarg3; arg3 = &temp3; { try { try { std_vector_Sl_RCF_TransportProtocol_Sg__set(arg1,arg2,(enum RCF::TransportProtocol const &)*arg3); } catch(std::out_of_range &_e) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, (&_e)->what()); return ; } } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1TransportProtocolList(JNIEnv *jenv, jclass jcls, jlong jarg1) { std::vector< RCF::TransportProtocol > *arg1 = (std::vector< RCF::TransportProtocol > *) 0 ; (void)jenv; (void)jcls; arg1 = *(std::vector< RCF::TransportProtocol > **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_init(JNIEnv *jenv, jclass jcls) { (void)jenv; (void)jcls; { try { RCF::init(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_deinit(JNIEnv *jenv, jclass jcls) { (void)jenv; (void)jcls; { try { RCF::deinit(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_isProBuild(JNIEnv *jenv, jclass jcls) { jboolean jresult = 0 ; bool result; (void)jenv; (void)jcls; { try { result = (bool)RCF::isProBuild(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1LogTarget(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::LogTarget *arg1 = (RCF::LogTarget *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::LogTarget **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1LogToStdout(JNIEnv *jenv, jclass jcls) { jlong jresult = 0 ; RCF::LogToStdout *result = 0 ; (void)jenv; (void)jcls; { try { result = (RCF::LogToStdout *)new RCF::LogToStdout(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::LogToStdout **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1LogToStdout(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::LogToStdout *arg1 = (RCF::LogToStdout *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::LogToStdout **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1LogToDebugWindow(JNIEnv *jenv, jclass jcls) { jlong jresult = 0 ; RCF::LogToDebugWindow *result = 0 ; (void)jenv; (void)jcls; { try { result = (RCF::LogToDebugWindow *)new RCF::LogToDebugWindow(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::LogToDebugWindow **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1LogToDebugWindow(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::LogToDebugWindow *arg1 = (RCF::LogToDebugWindow *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::LogToDebugWindow **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1LogToEventLog(JNIEnv *jenv, jclass jcls, jstring jarg1, jint jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; int arg2 ; RCF::LogToEventLog *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); arg2 = (int)jarg2; { try { result = (RCF::LogToEventLog *)new RCF::LogToEventLog((std::string const &)*arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::LogToEventLog **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1LogToEventLog(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::LogToEventLog *arg1 = (RCF::LogToEventLog *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::LogToEventLog **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1LogToFile(JNIEnv *jenv, jclass jcls, jstring jarg1, jboolean jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; bool arg2 ; RCF::LogToFile *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); arg2 = jarg2 ? true : false; { try { result = (RCF::LogToFile *)new RCF::LogToFile((std::string const &)*arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::LogToFile **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1LogToFile(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::LogToFile *arg1 = (RCF::LogToFile *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::LogToFile **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_enableLogging(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jstring jarg3) { RCF::LogTarget *arg1 = 0 ; int arg2 ; std::string *arg3 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::LogTarget **)&jarg1; if (!arg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "RCF::LogTarget const & reference is null"); return ; } arg2 = (int)jarg2; if(!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg3_pstr = (const char *)jenv->GetStringUTFChars(jarg3, 0); if (!arg3_pstr) return ; std::string arg3_str(arg3_pstr); arg3 = &arg3_str; jenv->ReleaseStringUTFChars(jarg3, arg3_pstr); { try { RCF::enableLogging((RCF::LogTarget const &)*arg1,arg2,(std::string const &)*arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_disableLogging(JNIEnv *jenv, jclass jcls) { (void)jenv; (void)jcls; { try { RCF::disableLogging(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Certificate_1_1getType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::Certificate *arg1 = (RCF::Certificate *) 0 ; boost::shared_ptr< RCF::Certificate > *smartarg1 = 0 ; RCF::CertificateImplementationType result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::Certificate > **)&jarg1; arg1 = (RCF::Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (RCF::CertificateImplementationType)(arg1)->_getType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Certificate_1_1downcastToWin32Certificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jlong jresult = 0 ; RCF::Certificate *arg1 = (RCF::Certificate *) 0 ; RCF::CertificatePtr arg2 ; boost::shared_ptr< RCF::Certificate > *smartarg1 = 0 ; RCF::CertificatePtr *argp2 ; RCF::Win32CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; smartarg1 = *(boost::shared_ptr< RCF::Certificate > **)&jarg1; arg1 = (RCF::Certificate *)(smartarg1 ? smartarg1->get() : 0); argp2 = *(RCF::CertificatePtr **)&jarg2; if (argp2) arg2 = *argp2; { try { result = (arg1)->_downcastToWin32Certificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::Win32CertificatePtr **)&jresult = result ? new RCF::Win32CertificatePtr(result) : 0; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Certificate_1_1downcastToX509Certificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jlong jresult = 0 ; RCF::Certificate *arg1 = (RCF::Certificate *) 0 ; RCF::CertificatePtr arg2 ; boost::shared_ptr< RCF::Certificate > *smartarg1 = 0 ; RCF::CertificatePtr *argp2 ; RCF::X509CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; smartarg1 = *(boost::shared_ptr< RCF::Certificate > **)&jarg1; arg1 = (RCF::Certificate *)(smartarg1 ? smartarg1->get() : 0); argp2 = *(RCF::CertificatePtr **)&jarg2; if (argp2) arg2 = *argp2; { try { result = (arg1)->_downcastToX509Certificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::X509CertificatePtr **)&jresult = result ? new RCF::X509CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1Certificate(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::Certificate *arg1 = (RCF::Certificate *) 0 ; boost::shared_ptr< RCF::Certificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::Certificate > **)&jarg1; arg1 = (RCF::Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1PemCertificate_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; std::string *arg2 = 0 ; RCF::PemCertificate *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return 0; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { result = (RCF::PemCertificate *)new RCF::PemCertificate((std::string const &)*arg1,(std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(boost::shared_ptr< RCF::PemCertificate > **)&jresult = result ? new boost::shared_ptr< RCF::PemCertificate >(result SWIG_NO_NULL_DELETER_1) : 0; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1PemCertificate_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jstring jarg1) { jlong jresult = 0 ; std::string *arg1 = 0 ; RCF::PemCertificate *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); { try { result = (RCF::PemCertificate *)new RCF::PemCertificate((std::string const &)*arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(boost::shared_ptr< RCF::PemCertificate > **)&jresult = result ? new boost::shared_ptr< RCF::PemCertificate >(result SWIG_NO_NULL_DELETER_1) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1PemCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::PemCertificate *arg1 = (RCF::PemCertificate *) 0 ; boost::shared_ptr< RCF::PemCertificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::PemCertificate > **)&jarg1; arg1 = (RCF::PemCertificate *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_X509Certificate_1_1getType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::X509Certificate *arg1 = (RCF::X509Certificate *) 0 ; boost::shared_ptr< RCF::X509Certificate > *smartarg1 = 0 ; RCF::CertificateImplementationType result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::X509Certificate > **)&jarg1; arg1 = (RCF::X509Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (RCF::CertificateImplementationType)(arg1)->_getType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_X509Certificate_1getCertificateName(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::X509Certificate *arg1 = (RCF::X509Certificate *) 0 ; boost::shared_ptr< RCF::X509Certificate > *smartarg1 = 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::X509Certificate > **)&jarg1; arg1 = (RCF::X509Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (arg1)->getCertificateName(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_X509Certificate_1getIssuerName(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::X509Certificate *arg1 = (RCF::X509Certificate *) 0 ; boost::shared_ptr< RCF::X509Certificate > *smartarg1 = 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::X509Certificate > **)&jarg1; arg1 = (RCF::X509Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (arg1)->getIssuerName(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1X509Certificate(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::X509Certificate *arg1 = (RCF::X509Certificate *) 0 ; boost::shared_ptr< RCF::X509Certificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::X509Certificate > **)&jarg1; arg1 = (RCF::X509Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32Certificate_1_1getType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::Win32Certificate *arg1 = (RCF::Win32Certificate *) 0 ; boost::shared_ptr< RCF::Win32Certificate > *smartarg1 = 0 ; RCF::CertificateImplementationType result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; arg1 = (RCF::Win32Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (RCF::CertificateImplementationType)(arg1)->_getType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32Certificate_1getCertificateName(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::Win32Certificate *arg1 = (RCF::Win32Certificate *) 0 ; boost::shared_ptr< RCF::Win32Certificate > *smartarg1 = 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; arg1 = (RCF::Win32Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (arg1)->getCertificateName(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32Certificate_1getIssuerName(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::Win32Certificate *arg1 = (RCF::Win32Certificate *) 0 ; boost::shared_ptr< RCF::Win32Certificate > *smartarg1 = 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; arg1 = (RCF::Win32Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { result = (arg1)->getIssuerName(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32Certificate_1exportToPfx(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::Win32Certificate *arg1 = (RCF::Win32Certificate *) 0 ; std::string *arg2 = 0 ; boost::shared_ptr< RCF::Win32Certificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; arg1 = (RCF::Win32Certificate *)(smartarg1 ? smartarg1->get() : 0); if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->exportToPfx((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32Certificate_1findRootCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jint jarg3) { jlong jresult = 0 ; RCF::Win32Certificate *arg1 = (RCF::Win32Certificate *) 0 ; RCF::Win32CertificateLocation arg2 ; RCF::Win32CertificateStore arg3 ; boost::shared_ptr< RCF::Win32Certificate > *smartarg1 = 0 ; RCF::Win32CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; arg1 = (RCF::Win32Certificate *)(smartarg1 ? smartarg1->get() : 0); arg2 = (RCF::Win32CertificateLocation)jarg2; arg3 = (RCF::Win32CertificateStore)jarg3; { try { result = (arg1)->findRootCertificate(arg2,arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::Win32CertificatePtr **)&jresult = result ? new RCF::Win32CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1Win32Certificate(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::Win32Certificate *arg1 = (RCF::Win32Certificate *) 0 ; boost::shared_ptr< RCF::Win32Certificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; arg1 = (RCF::Win32Certificate *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1PfxCertificate(JNIEnv *jenv, jclass jcls, jstring jarg1, jstring jarg2, jstring jarg3) { jlong jresult = 0 ; std::string *arg1 = 0 ; RCF::tstring *arg2 = 0 ; RCF::tstring *arg3 = 0 ; RCF::PfxCertificate *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return 0; } const jchar *arg2_pstr = jenv->GetStringChars(jarg2, 0); if (!arg2_pstr) return 0; jsize arg2_len = jenv->GetStringLength(jarg2); std::wstring arg2_str; if (arg2_len) { arg2_str.reserve(arg2_len); for (jsize i = 0; i < arg2_len; ++i) { arg2_str.push_back((wchar_t)arg2_pstr[i]); } } arg2 = &arg2_str; jenv->ReleaseStringChars(jarg2, arg2_pstr); if(!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return 0; } const jchar *arg3_pstr = jenv->GetStringChars(jarg3, 0); if (!arg3_pstr) return 0; jsize arg3_len = jenv->GetStringLength(jarg3); std::wstring arg3_str; if (arg3_len) { arg3_str.reserve(arg3_len); for (jsize i = 0; i < arg3_len; ++i) { arg3_str.push_back((wchar_t)arg3_pstr[i]); } } arg3 = &arg3_str; jenv->ReleaseStringChars(jarg3, arg3_pstr); { try { result = (RCF::PfxCertificate *)new RCF::PfxCertificate((std::string const &)*arg1,(RCF::tstring const &)*arg2,(RCF::tstring const &)*arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(boost::shared_ptr< RCF::PfxCertificate > **)&jresult = result ? new boost::shared_ptr< RCF::PfxCertificate >(result SWIG_NO_NULL_DELETER_1) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_PfxCertificate_1addToStore(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2, jint jarg3) { RCF::PfxCertificate *arg1 = (RCF::PfxCertificate *) 0 ; RCF::Win32CertificateLocation arg2 ; RCF::Win32CertificateStore arg3 ; boost::shared_ptr< RCF::PfxCertificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::PfxCertificate > **)&jarg1; arg1 = (RCF::PfxCertificate *)(smartarg1 ? smartarg1->get() : 0); arg2 = (RCF::Win32CertificateLocation)jarg2; arg3 = (RCF::Win32CertificateStore)jarg3; { try { (arg1)->addToStore(arg2,arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1PfxCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::PfxCertificate *arg1 = (RCF::PfxCertificate *) 0 ; boost::shared_ptr< RCF::PfxCertificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::PfxCertificate > **)&jarg1; arg1 = (RCF::PfxCertificate *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1StoreCertificate(JNIEnv *jenv, jclass jcls, jint jarg1, jint jarg2, jstring jarg3) { jlong jresult = 0 ; RCF::Win32CertificateLocation arg1 ; RCF::Win32CertificateStore arg2 ; RCF::tstring *arg3 = 0 ; RCF::StoreCertificate *result = 0 ; (void)jenv; (void)jcls; arg1 = (RCF::Win32CertificateLocation)jarg1; arg2 = (RCF::Win32CertificateStore)jarg2; if(!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return 0; } const jchar *arg3_pstr = jenv->GetStringChars(jarg3, 0); if (!arg3_pstr) return 0; jsize arg3_len = jenv->GetStringLength(jarg3); std::wstring arg3_str; if (arg3_len) { arg3_str.reserve(arg3_len); for (jsize i = 0; i < arg3_len; ++i) { arg3_str.push_back((wchar_t)arg3_pstr[i]); } } arg3 = &arg3_str; jenv->ReleaseStringChars(jarg3, arg3_pstr); { try { result = (RCF::StoreCertificate *)new RCF::StoreCertificate(arg1,arg2,(RCF::tstring const &)*arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(boost::shared_ptr< RCF::StoreCertificate > **)&jresult = result ? new boost::shared_ptr< RCF::StoreCertificate >(result SWIG_NO_NULL_DELETER_1) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_StoreCertificate_1removeFromStore(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::StoreCertificate *arg1 = (RCF::StoreCertificate *) 0 ; boost::shared_ptr< RCF::StoreCertificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::StoreCertificate > **)&jarg1; arg1 = (RCF::StoreCertificate *)(smartarg1 ? smartarg1->get() : 0); { try { (arg1)->removeFromStore(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1StoreCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::StoreCertificate *arg1 = (RCF::StoreCertificate *) 0 ; boost::shared_ptr< RCF::StoreCertificate > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::StoreCertificate > **)&jarg1; arg1 = (RCF::StoreCertificate *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1StoreCertificateIterator(JNIEnv *jenv, jclass jcls, jint jarg1, jint jarg2) { jlong jresult = 0 ; RCF::Win32CertificateLocation arg1 ; RCF::Win32CertificateStore arg2 ; RCF::StoreCertificateIterator *result = 0 ; (void)jenv; (void)jcls; arg1 = (RCF::Win32CertificateLocation)jarg1; arg2 = (RCF::Win32CertificateStore)jarg2; { try { result = (RCF::StoreCertificateIterator *)new RCF::StoreCertificateIterator(arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::StoreCertificateIterator **)&jresult = result; return jresult; } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_StoreCertificateIterator_1moveNext(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::StoreCertificateIterator *arg1 = (RCF::StoreCertificateIterator *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::StoreCertificateIterator **)&jarg1; { try { result = (bool)(arg1)->moveNext(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_StoreCertificateIterator_1reset(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::StoreCertificateIterator *arg1 = (RCF::StoreCertificateIterator *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::StoreCertificateIterator **)&jarg1; { try { (arg1)->reset(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_StoreCertificateIterator_1current(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::StoreCertificateIterator *arg1 = (RCF::StoreCertificateIterator *) 0 ; RCF::Win32CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::StoreCertificateIterator **)&jarg1; { try { result = (arg1)->current(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::Win32CertificatePtr **)&jresult = result ? new RCF::Win32CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1StoreCertificateIterator(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::StoreCertificateIterator *arg1 = (RCF::StoreCertificateIterator *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::StoreCertificateIterator **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_setDefaultSslImplementation(JNIEnv *jenv, jclass jcls, jint jarg1) { RCF::SslImplementation arg1 ; (void)jenv; (void)jcls; arg1 = (RCF::SslImplementation)jarg1; { try { RCF::setDefaultSslImplementation(arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_getDefaultSslImplementation(JNIEnv *jenv, jclass jcls) { jint jresult = 0 ; RCF::SslImplementation result; (void)jenv; (void)jcls; { try { result = (RCF::SslImplementation)RCF::getDefaultSslImplementation(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1IpAddress_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jstring jarg1) { jlong jresult = 0 ; std::string *arg1 = 0 ; RCF::IpAddress *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); { try { result = (RCF::IpAddress *)new RCF::IpAddress((std::string const &)*arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::IpAddress **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1IpAddress_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jstring jarg1, jint jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; int arg2 ; RCF::IpAddress *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); arg2 = (int)jarg2; { try { result = (RCF::IpAddress *)new RCF::IpAddress((std::string const &)*arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::IpAddress **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1IpAddress(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::IpAddress *arg1 = (RCF::IpAddress *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::IpAddress **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1ThreadPool_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong jresult = 0 ; std::size_t arg1 ; RCF::ThreadPool *result = 0 ; (void)jenv; (void)jcls; arg1 = (std::size_t)jarg1; { try { result = (RCF::ThreadPool *)new RCF::ThreadPool(arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(boost::shared_ptr< RCF::ThreadPool > **)&jresult = result ? new boost::shared_ptr< RCF::ThreadPool >(result SWIG_NO_NULL_DELETER_1) : 0; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1ThreadPool_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jlong jarg2) { jlong jresult = 0 ; std::size_t arg1 ; std::size_t arg2 ; RCF::ThreadPool *result = 0 ; (void)jenv; (void)jcls; arg1 = (std::size_t)jarg1; arg2 = (std::size_t)jarg2; { try { result = (RCF::ThreadPool *)new RCF::ThreadPool(arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(boost::shared_ptr< RCF::ThreadPool > **)&jresult = result ? new boost::shared_ptr< RCF::ThreadPool >(result SWIG_NO_NULL_DELETER_1) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1setThreadMinCount(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; std::size_t arg2 ; boost::shared_ptr< RCF::ThreadPool > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); arg2 = (std::size_t)jarg2; { try { (arg1)->setThreadMinCount(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1getThreadMinCount(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::shared_ptr< RCF::ThreadPool const > *smartarg1 = 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< const RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); { try { result = ((RCF::ThreadPool const *)arg1)->getThreadMinCount(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1setThreadMaxCount(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; std::size_t arg2 ; boost::shared_ptr< RCF::ThreadPool > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); arg2 = (std::size_t)jarg2; { try { (arg1)->setThreadMaxCount(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1getThreadMaxCount(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::shared_ptr< RCF::ThreadPool const > *smartarg1 = 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< const RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); { try { result = ((RCF::ThreadPool const *)arg1)->getThreadMaxCount(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1setThreadIdleTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::uint32_t arg2 ; boost::shared_ptr< RCF::ThreadPool > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); arg2 = (boost::uint32_t)jarg2; { try { (arg1)->setThreadIdleTimeoutMs(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1getThreadIdleTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::shared_ptr< RCF::ThreadPool const > *smartarg1 = 0 ; boost::uint32_t result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< const RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); { try { result = (boost::uint32_t)((RCF::ThreadPool const *)arg1)->getThreadIdleTimeoutMs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1setReserveLastThread(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jboolean jarg2) { RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; bool arg2 ; boost::shared_ptr< RCF::ThreadPool > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); arg2 = jarg2 ? true : false; { try { (arg1)->setReserveLastThread(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1getReserveLastThread(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::shared_ptr< RCF::ThreadPool const > *smartarg1 = 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< const RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); { try { result = (bool)((RCF::ThreadPool const *)arg1)->getReserveLastThread(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1setThreadName(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; std::string *arg2 = 0 ; boost::shared_ptr< RCF::ThreadPool > *smartarg1 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->setThreadName((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ThreadPool_1getThreadName(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::shared_ptr< RCF::ThreadPool const > *smartarg1 = 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; smartarg1 = *(boost::shared_ptr< const RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); { try { result = ((RCF::ThreadPool const *)arg1)->getThreadName(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1ThreadPool(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::ThreadPool *arg1 = (RCF::ThreadPool *) 0 ; boost::shared_ptr< RCF::ThreadPool > *smartarg1 = 0 ; (void)jenv; (void)jcls; smartarg1 = *(boost::shared_ptr< RCF::ThreadPool > **)&jarg1; arg1 = (RCF::ThreadPool *)(smartarg1 ? smartarg1->get() : 0); { try { (void)arg1; delete smartarg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1Endpoint(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::Endpoint *arg1 = (RCF::Endpoint *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::Endpoint **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1TcpEndpoint_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jint jarg1) { jlong jresult = 0 ; int arg1 ; RCF::TcpEndpoint *result = 0 ; (void)jenv; (void)jcls; arg1 = (int)jarg1; { try { result = (RCF::TcpEndpoint *)new RCF::TcpEndpoint(arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::TcpEndpoint **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1TcpEndpoint_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jstring jarg1, jint jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; int arg2 ; RCF::TcpEndpoint *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); arg2 = (int)jarg2; { try { result = (RCF::TcpEndpoint *)new RCF::TcpEndpoint((std::string const &)*arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::TcpEndpoint **)&jresult = result; return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TcpEndpoint_1getIp(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::TcpEndpoint *arg1 = (RCF::TcpEndpoint *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::TcpEndpoint **)&jarg1; { try { result = ((RCF::TcpEndpoint const *)arg1)->getIp(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TcpEndpoint_1getPort(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::TcpEndpoint *arg1 = (RCF::TcpEndpoint *) 0 ; int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::TcpEndpoint **)&jarg1; { try { result = (int)((RCF::TcpEndpoint const *)arg1)->getPort(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TcpEndpoint_1asString(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::TcpEndpoint *arg1 = (RCF::TcpEndpoint *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::TcpEndpoint **)&jarg1; { try { result = ((RCF::TcpEndpoint const *)arg1)->asString(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1TcpEndpoint(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::TcpEndpoint *arg1 = (RCF::TcpEndpoint *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::TcpEndpoint **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1Win32NamedPipeEndpoint(JNIEnv *jenv, jclass jcls, jstring jarg1) { jlong jresult = 0 ; RCF::tstring *arg1 = 0 ; RCF::Win32NamedPipeEndpoint *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return 0; } const jchar *arg1_pstr = jenv->GetStringChars(jarg1, 0); if (!arg1_pstr) return 0; jsize arg1_len = jenv->GetStringLength(jarg1); std::wstring arg1_str; if (arg1_len) { arg1_str.reserve(arg1_len); for (jsize i = 0; i < arg1_len; ++i) { arg1_str.push_back((wchar_t)arg1_pstr[i]); } } arg1 = &arg1_str; jenv->ReleaseStringChars(jarg1, arg1_pstr); { try { result = (RCF::Win32NamedPipeEndpoint *)new RCF::Win32NamedPipeEndpoint((RCF::tstring const &)*arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::Win32NamedPipeEndpoint **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1Win32NamedPipeEndpoint(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::Win32NamedPipeEndpoint *arg1 = (RCF::Win32NamedPipeEndpoint *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::Win32NamedPipeEndpoint **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1UnixLocalEndpoint(JNIEnv *jenv, jclass jcls, jstring jarg1) { jlong jresult = 0 ; std::string *arg1 = 0 ; RCF::UnixLocalEndpoint *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); { try { result = (RCF::UnixLocalEndpoint *)new RCF::UnixLocalEndpoint((std::string const &)*arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::UnixLocalEndpoint **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1UnixLocalEndpoint(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::UnixLocalEndpoint *arg1 = (RCF::UnixLocalEndpoint *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::UnixLocalEndpoint **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1HttpEndpoint_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jint jarg1) { jlong jresult = 0 ; int arg1 ; RCF::HttpEndpoint *result = 0 ; (void)jenv; (void)jcls; arg1 = (int)jarg1; { try { result = (RCF::HttpEndpoint *)new RCF::HttpEndpoint(arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::HttpEndpoint **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1HttpEndpoint_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jstring jarg1, jint jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; int arg2 ; RCF::HttpEndpoint *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); arg2 = (int)jarg2; { try { result = (RCF::HttpEndpoint *)new RCF::HttpEndpoint((std::string const &)*arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::HttpEndpoint **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1HttpEndpoint(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::HttpEndpoint *arg1 = (RCF::HttpEndpoint *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::HttpEndpoint **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1HttpsEndpoint_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jint jarg1) { jlong jresult = 0 ; int arg1 ; RCF::HttpsEndpoint *result = 0 ; (void)jenv; (void)jcls; arg1 = (int)jarg1; { try { result = (RCF::HttpsEndpoint *)new RCF::HttpsEndpoint(arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::HttpsEndpoint **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1HttpsEndpoint_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jstring jarg1, jint jarg2) { jlong jresult = 0 ; std::string *arg1 = 0 ; int arg2 ; RCF::HttpsEndpoint *result = 0 ; (void)jenv; (void)jcls; if(!jarg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return 0; } const char *arg1_pstr = (const char *)jenv->GetStringUTFChars(jarg1, 0); if (!arg1_pstr) return 0; std::string arg1_str(arg1_pstr); arg1 = &arg1_str; jenv->ReleaseStringUTFChars(jarg1, arg1_pstr); arg2 = (int)jarg2; { try { result = (RCF::HttpsEndpoint *)new RCF::HttpsEndpoint((std::string const &)*arg1,arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::HttpsEndpoint **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1HttpsEndpoint(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::HttpsEndpoint *arg1 = (RCF::HttpsEndpoint *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::HttpsEndpoint **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1getTransportType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; RCF::TransportType result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; { try { result = (RCF::TransportType)(arg1)->getTransportType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1setMaxIncomingMessageLength(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::size_t arg2 ; RCF::ServerTransport *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; arg2 = (std::size_t)jarg2; { try { result = (RCF::ServerTransport *) &(arg1)->setMaxIncomingMessageLength(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ServerTransport **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1getMaxIncomingMessageLength(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; { try { result = ((RCF::ServerTransport const *)arg1)->getMaxIncomingMessageLength(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1setConnectionLimit(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::size_t arg2 ; RCF::ServerTransport *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; arg2 = (std::size_t)jarg2; { try { result = (RCF::ServerTransport *) &(arg1)->setConnectionLimit(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ServerTransport **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1getConnectionLimit(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; { try { result = ((RCF::ServerTransport const *)arg1)->getConnectionLimit(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1setInitialNumberOfConnections(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::size_t arg2 ; RCF::ServerTransport *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; arg2 = (std::size_t)jarg2; { try { result = (RCF::ServerTransport *) &(arg1)->setInitialNumberOfConnections(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ServerTransport **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1getInitialNumberOfConnections(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; { try { result = ((RCF::ServerTransport const *)arg1)->getInitialNumberOfConnections(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1setThreadPool(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; RCF::ThreadPoolPtr arg2 ; RCF::ThreadPoolPtr *argp2 ; RCF::ServerTransport *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::ServerTransport **)&jarg1; argp2 = *(RCF::ThreadPoolPtr **)&jarg2; if (argp2) arg2 = *argp2; { try { result = (RCF::ServerTransport *) &(arg1)->setThreadPool(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ServerTransport **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1setSupportedProtocols(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::vector< RCF::TransportProtocol > *arg2 = 0 ; RCF::ServerTransport *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::ServerTransport **)&jarg1; arg2 = *(std::vector< RCF::TransportProtocol > **)&jarg2; if (!arg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "std::vector< RCF::TransportProtocol > const & reference is null"); return 0; } { try { result = (RCF::ServerTransport *) &(arg1)->setSupportedProtocols((std::vector< RCF::TransportProtocol > const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ServerTransport **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ServerTransport_1getSupportedProtocols(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; std::vector< RCF::TransportProtocol > *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ServerTransport **)&jarg1; { try { result = (std::vector< RCF::TransportProtocol > *) &((RCF::ServerTransport const *)arg1)->getSupportedProtocols(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(std::vector< RCF::TransportProtocol > **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1ServerTransport(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::ServerTransport *arg1 = (RCF::ServerTransport *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::ServerTransport **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1getTransportType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; RCF::TransportType result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { result = (RCF::TransportType)(arg1)->getTransportType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1setMaxIncomingMessageLength(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; std::size_t arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; arg2 = (std::size_t)jarg2; { try { (arg1)->setMaxIncomingMessageLength(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1getMaxIncomingMessageLength(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { result = ((RCF::ClientTransport const *)arg1)->getMaxIncomingMessageLength(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1getLastRequestSize(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { result = (arg1)->getLastRequestSize(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1getLastResponseSize(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { result = (arg1)->getLastResponseSize(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jobject JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1getRunningTotalBytesSent(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jobject jresult = 0 ; RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; boost::uint64_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { result = (boost::uint64_t)(arg1)->getRunningTotalBytesSent(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } { jbyteArray ba = jenv->NewByteArray(9); jbyte* bae = jenv->GetByteArrayElements(ba, 0); jclass clazz = jenv->FindClass("java/math/BigInteger"); jmethodID mid = jenv->GetMethodID(clazz, "<init>", "([B)V"); jobject bigint; int i; bae[0] = 0; for(i=1; i<9; i++ ) { bae[i] = (jbyte)(result>>8*(8-i)); } jenv->ReleaseByteArrayElements(ba, bae, 0); bigint = jenv->NewObject(clazz, mid, ba); jresult = bigint; } return jresult; } SWIGEXPORT jobject JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1getRunningTotalBytesReceived(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jobject jresult = 0 ; RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; boost::uint64_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { result = (boost::uint64_t)(arg1)->getRunningTotalBytesReceived(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } { jbyteArray ba = jenv->NewByteArray(9); jbyte* bae = jenv->GetByteArrayElements(ba, 0); jclass clazz = jenv->FindClass("java/math/BigInteger"); jmethodID mid = jenv->GetMethodID(clazz, "<init>", "([B)V"); jobject bigint; int i; bae[0] = 0; for(i=1; i<9; i++ ) { bae[i] = (jbyte)(result>>8*(8-i)); } jenv->ReleaseByteArrayElements(ba, bae, 0); bigint = jenv->NewObject(clazz, mid, ba); jresult = bigint; } return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_ClientTransport_1resetRunningTotals(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::ClientTransport **)&jarg1; { try { (arg1)->resetRunningTotals(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1ClientTransport(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::ClientTransport *arg1 = (RCF::ClientTransport *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::ClientTransport **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallbackArgs_1mErrorString_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::_SwigCallbackArgs *arg1 = (RCF::_SwigCallbackArgs *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::_SwigCallbackArgs **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); if (arg1) (arg1)->mErrorString = *arg2; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallbackArgs_1mErrorString_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::_SwigCallbackArgs *arg1 = (RCF::_SwigCallbackArgs *) 0 ; std::string *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::_SwigCallbackArgs **)&jarg1; result = (std::string *) & ((arg1)->mErrorString); jresult = jenv->NewStringUTF(result->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallbackArgs_1mCertificatePtr_1set(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::_SwigCallbackArgs *arg1 = (RCF::_SwigCallbackArgs *) 0 ; RCF::CertificatePtr *arg2 = 0 ; RCF::CertificatePtr tempnull2 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::_SwigCallbackArgs **)&jarg1; arg2 = jarg2 ? *(RCF::CertificatePtr **)&jarg2 : &tempnull2; if (arg1) (arg1)->mCertificatePtr = *arg2; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallbackArgs_1mCertificatePtr_1get(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::_SwigCallbackArgs *arg1 = (RCF::_SwigCallbackArgs *) 0 ; RCF::CertificatePtr *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::_SwigCallbackArgs **)&jarg1; result = (RCF::CertificatePtr *) & ((arg1)->mCertificatePtr); *(RCF::CertificatePtr **)&jresult = *result ? new RCF::CertificatePtr(*result) : 0; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1_1SwigCallbackArgs(JNIEnv *jenv, jclass jcls) { jlong jresult = 0 ; RCF::_SwigCallbackArgs *result = 0 ; (void)jenv; (void)jcls; { try { result = (RCF::_SwigCallbackArgs *)new RCF::_SwigCallbackArgs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::_SwigCallbackArgs **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1_1SwigCallbackArgs(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::_SwigCallbackArgs *arg1 = (RCF::_SwigCallbackArgs *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::_SwigCallbackArgs **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1_1SwigCallback(JNIEnv *jenv, jclass jcls) { jlong jresult = 0 ; RCF::_SwigCallback *result = 0 ; (void)jenv; (void)jcls; { try { result = (RCF::_SwigCallback *)new SwigDirector__SwigCallback(jenv); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::_SwigCallback **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1_1SwigCallback(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::_SwigCallback **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1Run(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::_SwigCallback **)&jarg1; { try { (arg1)->Run(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1RunSwigExplicit_1SwigCallback(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::_SwigCallback **)&jarg1; { try { (arg1)->RCF::_SwigCallback::Run(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1ProtoRpcBegin(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_, jlong jarg4, jobject jarg4_, jstring jarg5, jint jarg6) { RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; RCF::_SwigCallbackArgs *arg2 = (RCF::_SwigCallbackArgs *) 0 ; RCF::RcfProtoServer *arg3 = (RCF::RcfProtoServer *) 0 ; RCF::RcfProtoSession *arg4 = (RCF::RcfProtoSession *) 0 ; std::string *arg5 = 0 ; int arg6 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; (void)jarg3_; (void)jarg4_; arg1 = *(RCF::_SwigCallback **)&jarg1; arg2 = *(RCF::_SwigCallbackArgs **)&jarg2; arg3 = *(RCF::RcfProtoServer **)&jarg3; arg4 = *(RCF::RcfProtoSession **)&jarg4; if(!jarg5) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg5_pstr = (const char *)jenv->GetStringUTFChars(jarg5, 0); if (!arg5_pstr) return ; std::string arg5_str(arg5_pstr); arg5 = &arg5_str; jenv->ReleaseStringUTFChars(jarg5, arg5_pstr); arg6 = (int)jarg6; { try { (arg1)->ProtoRpcBegin(arg2,arg3,arg4,(std::string const &)*arg5,arg6); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1ProtoRpcBeginSwigExplicit_1SwigCallback(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_, jlong jarg3, jobject jarg3_, jlong jarg4, jobject jarg4_, jstring jarg5, jint jarg6) { RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; RCF::_SwigCallbackArgs *arg2 = (RCF::_SwigCallbackArgs *) 0 ; RCF::RcfProtoServer *arg3 = (RCF::RcfProtoServer *) 0 ; RCF::RcfProtoSession *arg4 = (RCF::RcfProtoSession *) 0 ; std::string *arg5 = 0 ; int arg6 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; (void)jarg3_; (void)jarg4_; arg1 = *(RCF::_SwigCallback **)&jarg1; arg2 = *(RCF::_SwigCallbackArgs **)&jarg2; arg3 = *(RCF::RcfProtoServer **)&jarg3; arg4 = *(RCF::RcfProtoSession **)&jarg4; if(!jarg5) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg5_pstr = (const char *)jenv->GetStringUTFChars(jarg5, 0); if (!arg5_pstr) return ; std::string arg5_str(arg5_pstr); arg5 = &arg5_str; jenv->ReleaseStringUTFChars(jarg5, arg5_pstr); arg6 = (int)jarg6; { try { (arg1)->RCF::_SwigCallback::ProtoRpcBegin(arg2,arg3,arg4,(std::string const &)*arg5,arg6); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1ValidateCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jboolean jresult = 0 ; RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; RCF::_SwigCallbackArgs *arg2 = (RCF::_SwigCallbackArgs *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::_SwigCallback **)&jarg1; arg2 = *(RCF::_SwigCallbackArgs **)&jarg2; { try { result = (bool)(arg1)->ValidateCertificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1ValidateCertificateSwigExplicit_1SwigCallback(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jboolean jresult = 0 ; RCF::_SwigCallback *arg1 = (RCF::_SwigCallback *) 0 ; RCF::_SwigCallbackArgs *arg2 = (RCF::_SwigCallbackArgs *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::_SwigCallback **)&jarg1; arg2 = *(RCF::_SwigCallbackArgs **)&jarg2; { try { result = (bool)(arg1)->RCF::_SwigCallback::ValidateCertificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1director_1connect(JNIEnv *jenv, jclass jcls, jobject jself, jlong objarg, jboolean jswig_mem_own, jboolean jweak_global) { RCF::_SwigCallback *obj = *((RCF::_SwigCallback **)&objarg); (void)jcls; SwigDirector__SwigCallback *director = dynamic_cast<SwigDirector__SwigCallback *>(obj); if (director) { director->swig_connect_director(jenv, jself, jenv->GetObjectClass(jself), (jswig_mem_own == JNI_TRUE), (jweak_global == JNI_TRUE)); } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI__1SwigCallback_1change_1ownership(JNIEnv *jenv, jclass jcls, jobject jself, jlong objarg, jboolean jtake_or_release) { RCF::_SwigCallback *obj = *((RCF::_SwigCallback **)&objarg); SwigDirector__SwigCallback *director = dynamic_cast<SwigDirector__SwigCallback *>(obj); (void)jcls; if (director) { director->swig_java_change_ownership(jenv, jself, jtake_or_release ? true : false); } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1_1CallMethodSwig(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jint jarg3, jbyteArray jarg4, jlong jarg6, jobject jarg6_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string *arg2 = 0 ; int arg3 ; char *arg4 = (char *) 0 ; size_t arg5 ; RCF::_SwigCallback *arg6 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg6_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); arg3 = (int)jarg3; { arg4 = (char *) jenv->GetByteArrayElements(jarg4, 0); arg5 = (size_t) jenv->GetArrayLength(jarg4); } arg6 = *(RCF::_SwigCallback **)&jarg6; { try { (arg1)->_CallMethodSwig((std::string const &)*arg2,arg3,arg4,arg5,arg6); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } { jenv->ReleaseByteArrayElements(jarg4, (jbyte *)arg4, 0); } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1_1GetResponseBufferLength(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (int)((RCF::RcfProtoChannel const *)arg1)->_GetResponseBufferLength(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1_1GetResponseBuffer(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; char *arg2 = (char *) 0 ; size_t arg3 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { arg2 = (char *) jenv->GetByteArrayElements(jarg2, 0); arg3 = (size_t) jenv->GetArrayLength(jarg2); } { try { ((RCF::RcfProtoChannel const *)arg1)->_GetResponseBuffer(arg2,arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } { jenv->ReleaseByteArrayElements(jarg2, (jbyte *)arg2, 0); } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1RcfProtoChannel(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::Endpoint *arg1 = 0 ; RCF::RcfProtoChannel *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::Endpoint **)&jarg1; if (!arg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "RCF::Endpoint const & reference is null"); return 0; } { try { result = (RCF::RcfProtoChannel *)new RCF::RcfProtoChannel((RCF::Endpoint const &)*arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::RcfProtoChannel **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1_1CallMethodSwig_1WithCopy(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2, jint jarg3, jstring jarg4, jlong jarg5, jobject jarg5_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string *arg2 = 0 ; int arg3 ; std::string *arg4 = 0 ; RCF::_SwigCallback *arg5 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg5_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); arg3 = (int)jarg3; if(!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg4_pstr = (const char *)jenv->GetStringUTFChars(jarg4, 0); if (!arg4_pstr) return ; std::string arg4_str(arg4_pstr); arg4 = &arg4_str; jenv->ReleaseStringUTFChars(jarg4, arg4_pstr); arg5 = *(RCF::_SwigCallback **)&jarg5; { try { (arg1)->_CallMethodSwig_WithCopy((std::string const &)*arg2,arg3,(std::string const &)*arg4,arg5); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1_1GetResponseBuffer_1WithCopy(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = ((RCF::RcfProtoChannel const *)arg1)->_GetResponseBuffer_WithCopy(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1Failed(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (bool)(arg1)->Failed(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1ErrorText(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->ErrorText(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1StartCancel(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { (arg1)->StartCancel(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1Completed(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (bool)(arg1)->Completed(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1connect(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { (arg1)->connect(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1disconnect(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { (arg1)->disconnect(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setRemoteCallTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; unsigned int arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = (unsigned int)jarg2; { try { (arg1)->setRemoteCallTimeoutMs(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getRemoteCallTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; unsigned int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (unsigned int)((RCF::RcfProtoChannel const *)arg1)->getRemoteCallTimeoutMs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setConnectTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; unsigned int arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = (unsigned int)jarg2; { try { (arg1)->setConnectTimeoutMs(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getConnectTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; unsigned int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (unsigned int)((RCF::RcfProtoChannel const *)arg1)->getConnectTimeoutMs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setTransportProtocol(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::TransportProtocol arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = (RCF::TransportProtocol)jarg2; { try { (arg1)->setTransportProtocol(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getTransportProtocol(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::TransportProtocol result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (RCF::TransportProtocol)(arg1)->getTransportProtocol(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setAsynchronousRpcMode(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jboolean jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; bool arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = jarg2 ? true : false; { try { (arg1)->setAsynchronousRpcMode(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getAsynchronousRpcMode(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (bool)(arg1)->getAsynchronousRpcMode(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setPingBackIntervalMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; int arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = (int)jarg2; { try { (arg1)->setPingBackIntervalMs(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getPingBackIntervalMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (int)(arg1)->getPingBackIntervalMs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setHttpProxy(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->setHttpProxy((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getHttpProxy(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->getHttpProxy(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setHttpProxyPort(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; int arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = (int)jarg2; { try { (arg1)->setHttpProxyPort(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getHttpProxyPort(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (int)(arg1)->getHttpProxyPort(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getTransportType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::TransportType result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (RCF::TransportType)(arg1)->getTransportType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setUsername(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return ; } const jchar *arg2_pstr = jenv->GetStringChars(jarg2, 0); if (!arg2_pstr) return ; jsize arg2_len = jenv->GetStringLength(jarg2); std::wstring arg2_str; if (arg2_len) { arg2_str.reserve(arg2_len); for (jsize i = 0; i < arg2_len; ++i) { arg2_str.push_back((wchar_t)arg2_pstr[i]); } } arg2 = &arg2_str; jenv->ReleaseStringChars(jarg2, arg2_pstr); { try { (arg1)->setUsername((RCF::tstring const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getUsername(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->getUsername(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setPassword(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return ; } const jchar *arg2_pstr = jenv->GetStringChars(jarg2, 0); if (!arg2_pstr) return ; jsize arg2_len = jenv->GetStringLength(jarg2); std::wstring arg2_str; if (arg2_len) { arg2_str.reserve(arg2_len); for (jsize i = 0; i < arg2_len; ++i) { arg2_str.push_back((wchar_t)arg2_pstr[i]); } } arg2 = &arg2_str; jenv->ReleaseStringChars(jarg2, arg2_pstr); { try { (arg1)->setPassword((RCF::tstring const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getPassword(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->getPassword(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setKerberosSpn(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return ; } const jchar *arg2_pstr = jenv->GetStringChars(jarg2, 0); if (!arg2_pstr) return ; jsize arg2_len = jenv->GetStringLength(jarg2); std::wstring arg2_str; if (arg2_len) { arg2_str.reserve(arg2_len); for (jsize i = 0; i < arg2_len; ++i) { arg2_str.push_back((wchar_t)arg2_pstr[i]); } } arg2 = &arg2_str; jenv->ReleaseStringChars(jarg2, arg2_pstr); { try { (arg1)->setKerberosSpn((RCF::tstring const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getKerberosSpn(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->getKerberosSpn(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setEnableCompression(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jboolean jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; bool arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = jarg2 ? true : false; { try { (arg1)->setEnableCompression(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getEnableCompression(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (bool)(arg1)->getEnableCompression(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::CertificatePtr arg2 ; RCF::CertificatePtr *argp2 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; argp2 = *(RCF::CertificatePtr **)&jarg2; if (argp2) arg2 = *argp2; { try { (arg1)->setCertificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->getCertificate(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::CertificatePtr **)&jresult = result ? new RCF::CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setCaCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::CertificatePtr arg2 ; RCF::CertificatePtr *argp2 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; argp2 = *(RCF::CertificatePtr **)&jarg2; if (argp2) arg2 = *argp2; { try { (arg1)->setCaCertificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getCaCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (arg1)->getCaCertificate(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::CertificatePtr **)&jresult = result ? new RCF::CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setOpenSslCipherSuite(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->setOpenSslCipherSuite((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getOpenSslCipherSuite(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = ((RCF::RcfProtoChannel const *)arg1)->getOpenSslCipherSuite(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setEnableSchannelCertificateValidation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return ; } const jchar *arg2_pstr = jenv->GetStringChars(jarg2, 0); if (!arg2_pstr) return ; jsize arg2_len = jenv->GetStringLength(jarg2); std::wstring arg2_str; if (arg2_len) { arg2_str.reserve(arg2_len); for (jsize i = 0; i < arg2_len; ++i) { arg2_str.push_back((wchar_t)arg2_pstr[i]); } } arg2 = &arg2_str; jenv->ReleaseStringChars(jarg2, arg2_pstr); { try { (arg1)->setEnableSchannelCertificateValidation((RCF::tstring const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getEnableSchannelCertificateValidation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = ((RCF::RcfProtoChannel const *)arg1)->getEnableSchannelCertificateValidation(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1_1setCertificateValidationCallback(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::_SwigCallback *arg2 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = *(RCF::_SwigCallback **)&jarg2; { try { (arg1)->_setCertificateValidationCallback(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1setSslImplementation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::SslImplementation arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; arg2 = (RCF::SslImplementation)jarg2; { try { (arg1)->setSslImplementation(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoChannel_1getSslImplementation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; RCF::SslImplementation result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { result = (RCF::SslImplementation)((RCF::RcfProtoChannel const *)arg1)->getSslImplementation(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1RcfProtoChannel(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::RcfProtoChannel *arg1 = (RCF::RcfProtoChannel *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::RcfProtoChannel **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1GetRequestBufferLength(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; int result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (int)(arg1)->_GetRequestBufferLength(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1GetRequestBuffer(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; char *arg2 = (char *) 0 ; size_t arg3 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { arg2 = (char *) jenv->GetByteArrayElements(jarg2, 0); arg3 = (size_t) jenv->GetArrayLength(jarg2); } { try { (arg1)->_GetRequestBuffer(arg2,arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } { jenv->ReleaseByteArrayElements(jarg2, (jbyte *)arg2, 0); } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1SetResponseBuffer(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jbyteArray jarg2) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; char *arg2 = (char *) 0 ; size_t arg3 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { arg2 = (char *) jenv->GetByteArrayElements(jarg2, 0); arg3 = (size_t) jenv->GetArrayLength(jarg2); } { try { (arg1)->_SetResponseBuffer(arg2,arg3); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } { jenv->ReleaseByteArrayElements(jarg2, (jbyte *)arg2, 0); } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1SetFailed(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->SetFailed((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1IsCanceled(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (bool)((RCF::RcfProtoSession const *)arg1)->IsCanceled(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1Commit_1_1SWIG_10(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->_Commit((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1Commit_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { (arg1)->_Commit(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1GetRequestBuffer_1WithCopy(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (arg1)->_GetRequestBuffer_WithCopy(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1_1SetResponseBuffer_1WithCopy(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->_SetResponseBuffer_WithCopy((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getClientUsername(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (arg1)->getClientUsername(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getTransportProtocol(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; RCF::TransportProtocol result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (RCF::TransportProtocol)(arg1)->getTransportProtocol(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getTransportType(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; RCF::TransportType result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (RCF::TransportType)(arg1)->getTransportType(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jboolean JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getEnableCompression(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jboolean jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; bool result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (bool)(arg1)->getEnableCompression(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jboolean)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getConnectionDuration(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = ((RCF::RcfProtoSession const *)arg1)->getConnectionDuration(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getRemoteCallCount(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; std::size_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = ((RCF::RcfProtoSession const *)arg1)->getRemoteCallCount(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT jobject JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getTotalBytesReceived(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jobject jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; boost::uint64_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (boost::uint64_t)((RCF::RcfProtoSession const *)arg1)->getTotalBytesReceived(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } { jbyteArray ba = jenv->NewByteArray(9); jbyte* bae = jenv->GetByteArrayElements(ba, 0); jclass clazz = jenv->FindClass("java/math/BigInteger"); jmethodID mid = jenv->GetMethodID(clazz, "<init>", "([B)V"); jobject bigint; int i; bae[0] = 0; for(i=1; i<9; i++ ) { bae[i] = (jbyte)(result>>8*(8-i)); } jenv->ReleaseByteArrayElements(ba, bae, 0); bigint = jenv->NewObject(clazz, mid, ba); jresult = bigint; } return jresult; } SWIGEXPORT jobject JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoSession_1getTotalBytesSent(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jobject jresult = 0 ; RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; boost::uint64_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { result = (boost::uint64_t)((RCF::RcfProtoSession const *)arg1)->getTotalBytesSent(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } { jbyteArray ba = jenv->NewByteArray(9); jbyte* bae = jenv->GetByteArrayElements(ba, 0); jclass clazz = jenv->FindClass("java/math/BigInteger"); jmethodID mid = jenv->GetMethodID(clazz, "<init>", "([B)V"); jobject bigint; int i; bae[0] = 0; for(i=1; i<9; i++ ) { bae[i] = (jbyte)(result>>8*(8-i)); } jenv->ReleaseByteArrayElements(ba, bae, 0); bigint = jenv->NewObject(clazz, mid, ba); jresult = bigint; } return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1RcfProtoSession(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::RcfProtoSession *arg1 = (RCF::RcfProtoSession *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::RcfProtoSession **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1RcfProtoServer_1_1SWIG_10(JNIEnv *jenv, jclass jcls) { jlong jresult = 0 ; RCF::RcfProtoServer *result = 0 ; (void)jenv; (void)jcls; { try { result = (RCF::RcfProtoServer *)new RCF::RcfProtoServer(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::RcfProtoServer **)&jresult = result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_new_1RcfProtoServer_1_1SWIG_11(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::Endpoint *arg1 = 0 ; RCF::RcfProtoServer *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::Endpoint **)&jarg1; if (!arg1) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "RCF::Endpoint const & reference is null"); return 0; } { try { result = (RCF::RcfProtoServer *)new RCF::RcfProtoServer((RCF::Endpoint const &)*arg1); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::RcfProtoServer **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_delete_1RcfProtoServer(JNIEnv *jenv, jclass jcls, jlong jarg1) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; (void)jenv; (void)jcls; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { delete arg1; } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1start(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { (arg1)->start(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1stop(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { (arg1)->stop(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1_1setCallbackTable(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::_SwigCallback *arg2 = (RCF::_SwigCallback *) 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoServer **)&jarg1; arg2 = *(RCF::_SwigCallback **)&jarg2; { try { (arg1)->_setCallbackTable(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setThreadPool(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::ThreadPoolPtr arg2 ; RCF::ThreadPoolPtr *argp2 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoServer **)&jarg1; argp2 = *(RCF::ThreadPoolPtr **)&jarg2; if (argp2) arg2 = *argp2; { try { (arg1)->setThreadPool(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getThreadPool(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::ThreadPoolPtr result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (arg1)->getThreadPool(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ThreadPoolPtr **)&jresult = result ? new RCF::ThreadPoolPtr(result) : 0; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1addEndpoint(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::Endpoint *arg2 = 0 ; RCF::ServerTransport *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoServer **)&jarg1; arg2 = *(RCF::Endpoint **)&jarg2; if (!arg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "RCF::Endpoint const & reference is null"); return 0; } { try { result = (RCF::ServerTransport *) &(arg1)->addEndpoint((RCF::Endpoint const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::ServerTransport **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setSupportedTransportProtocols(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; std::vector< RCF::TransportProtocol > *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoServer **)&jarg1; arg2 = *(std::vector< RCF::TransportProtocol > **)&jarg2; if (!arg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "std::vector< RCF::TransportProtocol > const & reference is null"); return ; } { try { (arg1)->setSupportedTransportProtocols((std::vector< RCF::TransportProtocol > const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getSupportedTransportProtocols(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; std::vector< RCF::TransportProtocol > *result = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (std::vector< RCF::TransportProtocol > *) &((RCF::RcfProtoServer const *)arg1)->getSupportedTransportProtocols(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(std::vector< RCF::TransportProtocol > **)&jresult = result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setSessionTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; boost::uint32_t arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; arg2 = (boost::uint32_t)jarg2; { try { (arg1)->setSessionTimeoutMs(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getSessionTimeoutMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; boost::uint32_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (boost::uint32_t)(arg1)->getSessionTimeoutMs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setSessionHarvestingIntervalMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; boost::uint32_t arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; arg2 = (boost::uint32_t)jarg2; { try { (arg1)->setSessionHarvestingIntervalMs(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getSessionHarvestingIntervalMs(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; boost::uint32_t result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (boost::uint32_t)(arg1)->getSessionHarvestingIntervalMs(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jlong)result; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::CertificatePtr arg2 ; RCF::CertificatePtr *argp2 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoServer **)&jarg1; argp2 = *(RCF::CertificatePtr **)&jarg2; if (argp2) arg2 = *argp2; { try { (arg1)->setCertificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (arg1)->getCertificate(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::CertificatePtr **)&jresult = result ? new RCF::CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setOpenSslCipherSuite(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; std::string *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::string"); return ; } const char *arg2_pstr = (const char *)jenv->GetStringUTFChars(jarg2, 0); if (!arg2_pstr) return ; std::string arg2_str(arg2_pstr); arg2 = &arg2_str; jenv->ReleaseStringUTFChars(jarg2, arg2_pstr); { try { (arg1)->setOpenSslCipherSuite((std::string const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getOpenSslCipherSuite(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; std::string result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = ((RCF::RcfProtoServer const *)arg1)->getOpenSslCipherSuite(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = jenv->NewStringUTF((&result)->c_str()); return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setCaCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jlong jarg2, jobject jarg2_) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::CertificatePtr arg2 ; RCF::CertificatePtr *argp2 ; (void)jenv; (void)jcls; (void)jarg1_; (void)jarg2_; arg1 = *(RCF::RcfProtoServer **)&jarg1; argp2 = *(RCF::CertificatePtr **)&jarg2; if (argp2) arg2 = *argp2; { try { (arg1)->setCaCertificate(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getCaCertificate(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jlong jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::CertificatePtr result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (arg1)->getCaCertificate(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } *(RCF::CertificatePtr **)&jresult = result ? new RCF::CertificatePtr(result) : 0; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setEnableSchannelCertificateValidation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jstring jarg2) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::tstring *arg2 = 0 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; if(!jarg2) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null std::wstring"); return ; } const jchar *arg2_pstr = jenv->GetStringChars(jarg2, 0); if (!arg2_pstr) return ; jsize arg2_len = jenv->GetStringLength(jarg2); std::wstring arg2_str; if (arg2_len) { arg2_str.reserve(arg2_len); for (jsize i = 0; i < arg2_len; ++i) { arg2_str.push_back((wchar_t)arg2_pstr[i]); } } arg2 = &arg2_str; jenv->ReleaseStringChars(jarg2, arg2_pstr); { try { (arg1)->setEnableSchannelCertificateValidation((RCF::tstring const &)*arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jstring JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getEnableSchannelCertificateValidation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jstring jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::tstring result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = ((RCF::RcfProtoServer const *)arg1)->getEnableSchannelCertificateValidation(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jsize result_len = (&result)->length(); jchar *conv_buf = new jchar[result_len]; for (jsize i = 0; i < result_len; ++i) { conv_buf[i] = (jchar)result[i]; } jresult = jenv->NewString(conv_buf, result_len); delete [] conv_buf; return jresult; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1setSslImplementation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_, jint jarg2) { RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::SslImplementation arg2 ; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; arg2 = (RCF::SslImplementation)jarg2; { try { (arg1)->setSslImplementation(arg2); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return ; } } } SWIGEXPORT jint JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_RcfProtoServer_1getSslImplementation(JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_) { jint jresult = 0 ; RCF::RcfProtoServer *arg1 = (RCF::RcfProtoServer *) 0 ; RCF::SslImplementation result; (void)jenv; (void)jcls; (void)jarg1_; arg1 = *(RCF::RcfProtoServer **)&jarg1; { try { result = (RCF::SslImplementation)((RCF::RcfProtoServer const *)arg1)->getSslImplementation(); } catch(const RCF::Exception & e) { std::string msg = e.getErrorString(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } catch (std::exception & e) { std::string msg = e.what(); jclass excType = jenv->FindClass("java/lang/Exception"); jenv->ThrowNew(excType, msg.c_str()); return 0; } } jresult = (jint)result; return jresult; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_LogToStdout_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::LogTarget **)&baseptr = *(RCF::LogToStdout **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_LogToDebugWindow_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::LogTarget **)&baseptr = *(RCF::LogToDebugWindow **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_LogToEventLog_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::LogTarget **)&baseptr = *(RCF::LogToEventLog **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_LogToFile_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::LogTarget **)&baseptr = *(RCF::LogToFile **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_PemCertificate_1SWIGSmartPtrUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; boost::shared_ptr< RCF::PemCertificate > *argp1; boost::shared_ptr< RCF::Certificate > result; (void)jenv; (void)jcls; argp1 = *(boost::shared_ptr< RCF::PemCertificate > **)&jarg1; *(boost::shared_ptr< RCF::Certificate > **)&baseptr = argp1 ? new boost::shared_ptr< RCF::Certificate >(*argp1) : 0; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_X509Certificate_1SWIGSmartPtrUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; boost::shared_ptr< RCF::X509Certificate > *argp1; boost::shared_ptr< RCF::Certificate > result; (void)jenv; (void)jcls; argp1 = *(boost::shared_ptr< RCF::X509Certificate > **)&jarg1; *(boost::shared_ptr< RCF::Certificate > **)&baseptr = argp1 ? new boost::shared_ptr< RCF::Certificate >(*argp1) : 0; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32Certificate_1SWIGSmartPtrUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; boost::shared_ptr< RCF::Win32Certificate > *argp1; boost::shared_ptr< RCF::Certificate > result; (void)jenv; (void)jcls; argp1 = *(boost::shared_ptr< RCF::Win32Certificate > **)&jarg1; *(boost::shared_ptr< RCF::Certificate > **)&baseptr = argp1 ? new boost::shared_ptr< RCF::Certificate >(*argp1) : 0; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_PfxCertificate_1SWIGSmartPtrUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; boost::shared_ptr< RCF::PfxCertificate > *argp1; boost::shared_ptr< RCF::Win32Certificate > result; (void)jenv; (void)jcls; argp1 = *(boost::shared_ptr< RCF::PfxCertificate > **)&jarg1; *(boost::shared_ptr< RCF::Win32Certificate > **)&baseptr = argp1 ? new boost::shared_ptr< RCF::Win32Certificate >(*argp1) : 0; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_StoreCertificate_1SWIGSmartPtrUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; boost::shared_ptr< RCF::StoreCertificate > *argp1; boost::shared_ptr< RCF::Win32Certificate > result; (void)jenv; (void)jcls; argp1 = *(boost::shared_ptr< RCF::StoreCertificate > **)&jarg1; *(boost::shared_ptr< RCF::Win32Certificate > **)&baseptr = argp1 ? new boost::shared_ptr< RCF::Win32Certificate >(*argp1) : 0; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_TcpEndpoint_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::Endpoint **)&baseptr = *(RCF::TcpEndpoint **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_Win32NamedPipeEndpoint_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::Endpoint **)&baseptr = *(RCF::Win32NamedPipeEndpoint **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_UnixLocalEndpoint_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::Endpoint **)&baseptr = *(RCF::UnixLocalEndpoint **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_HttpEndpoint_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::TcpEndpoint **)&baseptr = *(RCF::HttpEndpoint **)&jarg1; return baseptr; } SWIGEXPORT jlong JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_HttpsEndpoint_1SWIGUpcast(JNIEnv *jenv, jclass jcls, jlong jarg1) { jlong baseptr = 0; (void)jenv; (void)jcls; *(RCF::TcpEndpoint **)&baseptr = *(RCF::HttpsEndpoint **)&jarg1; return baseptr; } SWIGEXPORT void JNICALL Java_com_deltavsoft_rcfproto_RCFProtoJNI_swig_1module_1init(JNIEnv *jenv, jclass jcls) { int i; static struct { const char *method; const char *signature; } methods[3] = { { "SwigDirector__SwigCallback_Run", "(Lcom/deltavsoft/rcfproto/_SwigCallback;)V" }, { "SwigDirector__SwigCallback_ProtoRpcBegin", "(Lcom/deltavsoft/rcfproto/_SwigCallback;JJJLjava/lang/String;I)V" }, { "SwigDirector__SwigCallback_ValidateCertificate", "(Lcom/deltavsoft/rcfproto/_SwigCallback;J)Z" } }; Swig::jclass_RCFProtoJNI = (jclass) jenv->NewGlobalRef(jcls); if (!Swig::jclass_RCFProtoJNI) return; for (i = 0; i < (int) (sizeof(methods)/sizeof(methods[0])); ++i) { Swig::director_methids[i] = jenv->GetStaticMethodID(jcls, methods[i].method, methods[i].signature); if (!Swig::director_methids[i]) return; } } #ifdef __cplusplus } #endif
gpl-2.0
klaussilveira/flQuake
engine/cvar.c
1
6587
/* Copyright (C) 1996-1997 Id Software, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ // cvar.c -- dynamic variable tracking #include "quakedef.h" cvar_t *cvar_vars; char *cvar_null_string = ""; /* ============ Cvar_FindVar ============ */ cvar_t *Cvar_FindVar(char *var_name) { cvar_t *var; for (var=cvar_vars ; var ; var=var->next) if (!Q_strcmp(var_name, var->name)) { return var; } return NULL; } /* ============ Cvar_VariableValue ============ */ float Cvar_VariableValue(char *var_name) { cvar_t *var; var = Cvar_FindVar(var_name); if (!var) { return 0; } return Q_atof(var->string); } /* ============ Cvar_VariableString ============ */ char *Cvar_VariableString(char *var_name) { cvar_t *var; var = Cvar_FindVar(var_name); if (!var) { return cvar_null_string; } return var->string; } /* ============ Cvar_CompleteVariable ============ */ char *Cvar_CompleteVariable(char *partial) { cvar_t *cvar; int len; len = Q_strlen(partial); if (!len) { return NULL; } // check functions for (cvar=cvar_vars ; cvar ; cvar=cvar->next) if (!Q_strncmp(partial,cvar->name, len)) { return cvar->name; } return NULL; } //Edited qrack command line begin /* ============ Cvar_CompleteCountPossible ============ */ int Cvar_CompleteCountPossible(char *partial) { cvar_t *cvar; int len, c = 0; if (!(len = strlen(partial))) { return 0; } // check partial match for (cvar = cvar_vars ; cvar ; cvar = cvar->next) if (!Q_strncasecmp(partial, cvar->name, len)) { c++; } return c; } //Edited qrack command line end /* ============ Cvar_Set ============ */ void Cvar_Set(char *var_name, char *value) { cvar_t *var; qboolean changed; var = Cvar_FindVar(var_name); if (!var) { // there is an error in C code if this happens Con_DPrintf("Cvar_Set: variable %s not found\n", var_name); // edited return; } changed = Q_strcmp(var->string, value); Z_Free(var->string); // free the old value string var->string = Z_Malloc(Q_strlen(value)+1); Q_strcpy(var->string, value); var->value = Q_atof(var->string); if (var->server && changed) { if (sv.active) if (svs.maxclients > 1) { // Edited SV_BroadcastPrintf("\"%s\" changed to \"%s\"\n", var->name, var->string); } } //Heffo - Cvar Callback Function if (var->Cvar_Changed) { var->Cvar_Changed(); } //Heffo - Cvar Callback Function } /* ============ Cvar_SetValue ============ */ extern char *MK_cleanftos(float f); // reduced config file void Cvar_SetValue(char *var_name, float value) { char val[32]; sprintf(val, "%s", MK_cleanftos(value)); // reduced config file Cvar_Set(var_name, val); } /* ============ Cvar_RegisterVariable Adds a freestanding variable to the variable list. ============ */ void Cvar_RegisterVariable(cvar_t *variable) { char *oldstr; // first check to see if it has allready been defined if (Cvar_FindVar(variable->name)) { Con_Printf("Can't register variable %s, allready defined\n", variable->name); return; } // check for overlap with a command if (Cmd_Exists(variable->name)) { Con_Printf("Cvar_RegisterVariable: %s is a command\n", variable->name); return; } // copy the value off, because future sets will Z_Free it oldstr = variable->string; variable->string = Z_Malloc(Q_strlen(variable->string)+1); Q_strcpy(variable->string, oldstr); variable->value = Q_atof(variable->string); // link the variable in variable->next = cvar_vars; cvar_vars = variable; } //Edited & Heffo - Cvar Callback Function - begin void Cvar_RegisterVariableWithCallback(cvar_t *variable, void *function) { // first check to see if it has allready been defined if (Cvar_FindVar(variable->name)) { Con_Printf("Can't register variable %s, allready defined\n", variable->name); return; } // check for overlap with a command if (Cmd_Exists(variable->name)) { Con_Printf("Cvar_RegisterVariable: %s is a command\n", variable->name); return; } Cvar_RegisterVariable(variable); variable->Cvar_Changed = function; } //Edited & Heffo - Cvar Callback Function - end /* ============ Cvar_Command Handles variable inspection and changing from the console ============ */ qboolean Cvar_Command(void) { cvar_t *v; // check variables v = Cvar_FindVar(Cmd_Argv(0)); if (!v) { return false; } // perform a variable print or set if (Cmd_Argc() == 1) { Con_Printf("\"%s\" is \"%s\"\n", v->name, v->string); return true; } Cvar_Set(v->name, Cmd_Argv(1)); return true; } /* ============ Cvar_WriteVariables Writes lines containing "set variable value" for all variables with the archive flag set to true. ============ */ qboolean HaveSemicolon(char *s); // reduced config file void Cvar_WriteVariables(FILE *f) { cvar_t *var; for (var = cvar_vars ; var ; var = var->next) if (var->archive) // reduced config file - begin { Cmd_TokenizeString(var->string); if (Cmd_Argc() == 1 && !HaveSemicolon(var->string)) { fprintf(f, "%s %s\n", var->name, var->string); } else // reduced config file - end { fprintf(f, "%s \"%s\"\n", var->name, var->string); } } // reduced config file }
gpl-2.0
iwtwiioi/OnlineJudge
wikioi/Run_304904_Score_60_Date_2013-10-23.cpp
1
1973
#include <iostream> #include <algorithm> #include <cstring> #include <iomanip> #include <cmath> using namespace std; const int oo = 100000000; #define CTOA(x) ((x-1)<<2)+1 struct city{int x[4], y[4];}ct[12]; inline double dis(int x1, int y1, int x2, int y2) { if(x1 == x2) return abs(y1-y2); if(y1 == y2) return abs(x1-x2); return (double)sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)); } double node[50][50]; double T, t; int s, A, B; int tx[3], ty[3]; void getfour(int c) { memcpy(tx, ct[c].x, sizeof(tx)); memcpy(ty, ct[c].y, sizeof(ty)); int tt; while((tx[0]-tx[1])*(tx[2]-tx[1])+(ty[0]-ty[1])*(ty[2]-ty[1])) { tt = tx[0]; tx[0]=tx[1]; tx[1]=tx[2]; tx[2]=tt; tt = ty[0]; ty[0]=ty[1]; ty[1]=ty[2]; ty[2]=tt; } ct[c].x[3] = tx[0]-tx[1]+tx[2]; ct[c].y[3] = ty[0]-ty[1]+ty[2]; } int main() { int N, i, j, k, l, temp; cin >> N; while(N--) { cin >> s >> t >> A >> B; if(A == B) cout << "0.0\n"; int airport = s << 2; for(i = 1; i <= airport; i++) for(j = 1; j <= airport; j++) node[i][j] = oo; for(i = 1; i <= s; i++) { for(j = 0; j < 3; j++) cin >> ct[i].x[j] >> ct[i].y[j]; cin >> T; getfour(i); for(j = 0; j < 4; j++) for(k = 0; k < 4; k++) if(j != k) node[CTOA(i)+j][CTOA(i)+k] = dis(ct[i].x[j], ct[i].y[j], ct[i].x[k], ct[i].y[k]) * T; } for(i = 1; i <= s; i++) for(j = 1; j <= s; j++) if(i != j) for(k = 0; k < 4; k++) for(l = 0; l < 4; l++) node[CTOA(i)+k][CTOA(j)+l] = dis(ct[i].x[k], ct[i].y[k], ct[j].x[l], ct[j].y[l]) * t; for(k = 1; k <= airport; k++) for(i = 1; i <= airport; i++) for(j = 1; j <= airport; j++) node[i][j] = min(node[i][j], node[i][k]+node[k][j]); double ans = oo; for(i = 0; i < 4; i++) for(j = 0; j < 4; j++) ans = min(ans, node[CTOA(A)+i][CTOA(B)+j]); cout << setiosflags(ios::fixed) << setprecision(1) << ans << endl; } return 0; }
gpl-2.0
hajuuk/R7000
ap/gpl/lsync/source/lsyncd.c
1
50232
/* | lsyncd.c Live (Mirror) Syncing Demon | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | This is Lsyncd's core. | | It contains as minimal as possible glues to the operating system needed | for Lsyncd's operation. All high-level logic is coded (when feasable) | into lsyncd.lua | | This code assumes you have a 100 character wide display to view it (when tabstop is 4) | | License: GPLv2 (see COPYING) or any later version | Authors: Axel Kittenberger <axkibe@gmail.com> | */ #include "lsyncd.h" #define SYSLOG_NAMES 1 #include <sys/select.h> #include <sys/stat.h> #include <sys/times.h> #include <sys/types.h> #include <sys/wait.h> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <limits.h> #include <signal.h> #include <stdbool.h> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <strings.h> #include <syslog.h> #include <math.h> #include <time.h> #include <unistd.h> #define LUA_USE_APICHECK 1 #include <lua.h> #include <lualib.h> #include <lauxlib.h> /* | The Lua part of Lsyncd */ extern const char runner_out[]; extern size_t runner_size; extern const char defaults_out[]; extern size_t defaults_size; /* | Makes sure there is one file system monitor. */ #ifndef LSYNCD_WITH_INOTIFY #ifndef LSYNCD_WITH_FANOTIFY #ifndef LSYNCD_WITH_FSEVENTS # error "need at least one notifcation system. please rerun ./configure" #endif #endif #endif /* | All monitors supported by this Lsyncd. */ static char *monitors[] = { #ifdef LSYNCD_WITH_INOTIFY "inotify", #endif #ifdef LSYNCD_WITH_FANOTIFY "fanotify", #endif #ifdef LSYNCD_WITH_FSEVENTS "fsevents", #endif NULL, }; /** | Configuration parameters that matter to the core */ struct settings settings = { .log_file = NULL, .log_syslog = false, .log_ident = NULL, .log_facility = LOG_USER, .log_level = LOG_NOTICE, .nodaemon = false, }; /* | True when Lsyncd daemonized itself. */ static bool is_daemon = false; /* | The config file loaded by Lsyncd. */ char * lsyncd_config_file = NULL; /* | False after first time Lsyncd started up. | | Configuration error messages are thus written to | stdout/stderr only on first start. | | All other resets (HUP or monitor OVERFLOW) run with 'insist' | implictly turned on and thus Lsyncd does not failing on a non | responding target. */ static bool first_time = true; /* | Set by TERM or HUP signal handler | telling Lsyncd should end or reset ASAP. */ volatile sig_atomic_t hup = 0; volatile sig_atomic_t term = 0; /* | The kernel's clock ticks per second. */ static long clocks_per_sec; /** * signal handler */ void sig_child(int sig) { // nothing } /** * signal handler */ void sig_handler(int sig) { switch (sig) { case SIGTERM: term = 1; return; case SIGHUP: hup = 1; return; } } /* | Non glibc builds need a real tms structure for the times( ) call */ #ifdef __GLIBC__ static struct tms * dummy_tms = NULL; #else static struct tms _dummy_tms; static struct tms * dummy_tms = &_dummy_tms; #endif /* | Returns the absolute path of a path. | | This is a wrapper to various C-Library differences. */ char * get_realpath( const char * rpath ) { // uses c-library to get the absolute path #ifdef __GLIBC__ // in case of GLIBC the task is easy. return realpath( rpath, NULL ); #else # warning having to use old style realpath() // otherwise less so and requires PATH_MAX limit char buf[ PATH_MAX] ; char *asw = realpath( rpath, buf ); if( !asw ) { return NULL; } return s_strdup( asw ); #endif } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Logging ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | A logging category */ struct logcat { char *name; int priority; }; /* | A table of all enabled logging categories. | Sorted by first letter for faster access. */ static struct logcat * logcats[ 26 ] = { 0, }; /* | Returns a positive priority if category is configured to be logged or -1. */ extern int check_logcat( const char *name ) { struct logcat *lc; if( name[ 0 ] < 'A' || name[ 0 ] > 'Z') { return 99; } lc = logcats[ name[ 0 ] - 'A' ]; if( !lc ) { return 99; } while( lc->name ) { if( !strcmp( lc->name, name ) ) { return lc->priority; } lc++; } return 99; } /* | Adds a logging category | | Returns true if OK. */ static bool add_logcat( const char *name, int priority ) { struct logcat *lc; if( !strcmp( "all", name ) ) { settings.log_level = 99; return true; } if( !strcmp( "scarce", name ) ) { settings.log_level = LOG_WARNING; return true; } // categories must start with a capital letter. if( name[ 0 ] < 'A' || name[ 0 ] > 'Z' ) { return false; } if( !logcats[ name[ 0 ]- 'A' ] ) { // an empty capital letter lc = logcats[name[0]-'A'] = s_calloc(2, sizeof(struct logcat)); } else { // length of letter list int ll = 0; // counts list length for( lc = logcats[name[0]-'A']; lc->name; lc++ ) { ll++; } // enlarges list logcats[ name[ 0 ] - 'A'] = s_realloc( logcats[ name[ 0 ]-'A' ], ( ll + 2 ) * sizeof( struct logcat ) ); // goes to the list end for( lc = logcats[ name[ 0 ] - 'A']; lc->name; lc++ ) { if( !strcmp( name, lc->name ) ) { // already there return true; } } } lc->name = s_strdup( name ); lc->priority = priority; // terminates the list lc[ 1 ].name = NULL; return true; } /* | Logs a string. | | Do not call this directly, but the macro logstring( ) | defined in lsyncd.h */ extern void logstring0( int priority, // the priority of the log message const char * cat, // the category const char * message // the log message ) { if( first_time ) { // lsyncd is in it's intial configuration phase. // thus just print to normal stdout/stderr. if( priority >= LOG_ERR ) { fprintf( stderr, "%s: %s\n", cat, message); } else { printf( "%s: %s\n", cat, message ); } return; } // writes on console if not daemonized if( !is_daemon ) { char ct[ 255 ]; // gets current timestamp hour:minute:second time_t mtime; time( &mtime ); strftime( ct, sizeof( ct ), "%T", localtime( &mtime ) ); FILE * flog = priority <= LOG_ERR ? stderr : stdout; fprintf( flog, "%s %s: %s\n", ct, cat, message ); } // writes to file if configured so if (settings.log_file) { FILE * flog = fopen( settings.log_file, "a" ); char * ct; time_t mtime; // gets current timestamp day-time-year time( &mtime ); ct = ctime( &mtime ); // cuts trailing linefeed ct[ strlen( ct ) - 1] = 0; if( flog == NULL ) { fprintf( stderr, "Cannot open logfile [%s]!\n", settings.log_file ); exit( -1 ); } fprintf( flog, "%s %s: %s\n", ct, cat, message ); fclose( flog ); } // sends to syslog if configured so if( settings.log_syslog ) { syslog( priority, "%s, %s", cat, message ); } return; } /* | Lets the core print logmessages comfortably as formated string. | This uses the lua_State for it easy string buffers only. */ extern void printlogf0(lua_State *L, int priority, const char *cat, const char *fmt, ...) { va_list ap; va_start(ap, fmt); lua_pushvfstring(L, fmt, ap); va_end(ap); logstring0(priority, cat, luaL_checkstring(L, -1)); lua_pop(L, 1); return; } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Simple memory management ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ // FIXME call the Lua garbace collector in case of out of memory /* | "Secured" calloc */ extern void * s_calloc( size_t nmemb, size_t size ) { void * r = calloc( nmemb, size ); if( r == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return r; } /* | "Secured" malloc */ extern void * s_malloc( size_t size ) { void * r = malloc( size ); if( r == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return r; } /* | "Secured" realloc */ extern void * s_realloc( void * ptr, size_t size ) { void * r = realloc( ptr, size ); if( r == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return r; } /* | "Secured" strdup */ extern char * s_strdup( const char *src ) { char *s = strdup( src ); if( s == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return s; } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Pipes Management ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | A child process gets text piped through stdin */ struct pipemsg { char * text; // message to send int tlen; // length of text int pos; // position in message }; /* | Called by the core whenever a pipe becomes | writeable again */ static void pipe_writey( lua_State * L, struct observance * observance ) { int fd = observance->fd; struct pipemsg *pm = (struct pipemsg * ) observance->extra; int len = write( fd, pm->text + pm->pos, pm->tlen - pm->pos ); pm->pos += len; if( len < 0 ) { logstring( "Normal", "broken pipe." ); nonobserve_fd( fd ); } else if( pm->pos >= pm->tlen ) { logstring( "Exec", "finished pipe." ); nonobserve_fd(fd); } } /* | Called when cleaning up a pipe. */ static void pipe_tidy( struct observance * observance ) { struct pipemsg *pm = ( struct pipemsg * ) observance->extra; close( observance->fd ); free( pm->text ); free( pm ); } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Helper Routines ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | Dummy variable of which it's address is used as | the cores index in the lua registry to | the lua runners function table in the lua registry. */ static int runner; /* | Dummy variable of which it's address is used as | the cores index n the lua registry to | the lua runners error handler. */ static int callError; /* | Sets the close-on-exit flag of a file descriptor. */ extern void close_exec_fd( int fd ) { int flags; flags = fcntl( fd, F_GETFD ); if( flags == -1 ) { logstring( "Error", "cannot get descriptor flags!" ); exit( -1 ); } flags |= FD_CLOEXEC; if( fcntl( fd, F_SETFD, flags ) == -1 ) { logstring( "Error", "cannot set descripptor flags!" ); exit( -1 ); } } /* | Sets the non-blocking flag of a file descriptor. */ extern void non_block_fd( int fd ) { int flags; flags = fcntl( fd, F_GETFL ); if( flags == -1 ) { logstring( "Error", "cannot get status flags!" ); exit( -1 ); } flags |= O_NONBLOCK; if( fcntl( fd, F_SETFL, flags ) == -1 ) { logstring( "Error", "cannot set status flags!" ); exit( -1 ); } } /* | Writes a pid file. */ static void write_pidfile( lua_State *L, const char *pidfile ) { FILE* f = fopen( pidfile, "w" ); if( !f ) { printlogf( L, "Error", "Cannot write pidfile; '%s'", pidfile ) ; exit( -1 ); } fprintf( f, "%i\n", getpid( ) ); fclose( f ); } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Observances ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | List of file descriptor watches. */ static struct observance * observances = NULL; static int observances_len = 0; static int observances_size = 0; /* | List of file descriptors to not observe. | | While working for the oberver lists, it may | not be altered, thus nonobserve stores the | delayed removals. */ static int * nonobservances = NULL; static int nonobservances_len = 0; static int nonobservances_size = 0; /* | True while the observances list is being handled. */ static bool observance_action = false; /* | Core watches a filedescriptor to become ready, | one of read_ready or write_ready may be zero */ extern void observe_fd( int fd, void ( * ready ) (lua_State *, struct observance * ), void ( * writey ) (lua_State *, struct observance * ), void ( * tidy ) (struct observance * ), void *extra ) { int pos; // looks if the fd is already there as pos or // stores the position to insert the new fd in pos for( pos = 0; pos < observances_len; pos++) { if( fd <= observances[ pos ].fd ) { break; } } if( pos < observances_len && observances[ pos ].fd == fd ) { // just updates an existing observance logstring( "Masterloop", "updating fd observance" ); observances[ pos ].ready = ready; observances[ pos ].writey = writey; observances[ pos ].tidy = tidy; observances[ pos ].extra = extra; return; } if( observance_action ) { // FIXME logstring( "Error", "New observances in ready/writey handlers not yet supported" ); exit( -1 ); } if( !tidy ) { logstring( "Error", "internal, tidy() in observe_fd() must not be NULL." ); exit( -1 ); } if( observances_len + 1 > observances_size ) { observances_size = observances_len + 1; observances = s_realloc( observances, observances_size * sizeof( struct observance ) ); } memmove( observances + pos + 1, observances + pos, (observances_len - pos) * sizeof(struct observance) ); observances_len++; observances[ pos ].fd = fd; observances[ pos ].ready = ready; observances[ pos ].writey = writey; observances[ pos ].tidy = tidy; observances[ pos ].extra = extra; } /* | Makes the core no longer watch a filedescriptor. */ extern void nonobserve_fd( int fd ) { int pos; if( observance_action ) { // this function is called through a ready/writey handler // while the core works through the observance list, thus // it does not alter the list, but stores this actions // on a stack nonobservances_len++; if( nonobservances_len > nonobservances_size ) { nonobservances_size = nonobservances_len; nonobservances = s_realloc( nonobservances, nonobservances_size * sizeof( int ) ); } nonobservances[ nonobservances_len - 1 ] = fd; return; } // looks for the fd for( pos = 0; pos < observances_len; pos++ ) { if( observances[ pos ].fd == fd ) { break; } } if( pos >= observances_len ) { logstring( "Error", "internal fail, not observance file descriptor in nonobserve" ); exit( -1 ); } // tidies up the observance observances[ pos ].tidy( observances + pos ); // and moves the list down memmove( observances + pos, observances + pos + 1, (observances_len - pos) * sizeof( struct observance ) ); observances_len--; } /* | A user observance became read-ready. */ static void user_obs_ready( lua_State * L, struct observance * obs ) { int fd = obs->fd; // pushes the ready table on table lua_pushlightuserdata( L, ( void * ) user_obs_ready ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the error handler lua_pushlightuserdata( L, (void *) &callError ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the user func lua_pushnumber( L, fd ); lua_gettable( L, -3 ); // gives the ufunc the fd lua_pushnumber( L, fd ); // calls the user function if( lua_pcall( L, 1, 0, -3 ) ) { exit( -1 ); } lua_pop( L, 2 ); } /* | A user observance became write-ready */ static void user_obs_writey( lua_State * L, struct observance * obs ) { int fd = obs->fd; // pushes the writey table on table lua_pushlightuserdata( L, (void *) user_obs_writey ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the error handler lua_pushlightuserdata(L, (void *) &callError); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the user func lua_pushnumber( L, fd ); lua_gettable( L, -3 ); // gives the user func the fd lua_pushnumber( L, fd ); // calls the user function if( lua_pcall( L, 1, 0, -3 ) ) { exit(-1); } lua_pop( L, 2 ); } /* | Tidies up a user observance | FIXME - give the user a chance to do something in that case! */ static void user_obs_tidy( struct observance *obs ) { close( obs->fd ); } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Library calls for the runner ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ static void daemonize( lua_State *L ); int l_stackdump( lua_State* L ); /* | Logs a message. | | Params on Lua stack: | | 1: loglevel of massage | 2: the string to log */ static int l_log( lua_State *L ) { const char * cat; // log category const char * message; // log message int priority; // log priority cat = luaL_checkstring( L, 1 ); priority = check_logcat( cat ); // skips filtered messages if( priority > settings.log_level ) { return 0; } // replaces non string values { int i; int top = lua_gettop(L); for( i = 1; i <= top; i++ ) { int t = lua_type( L, i ); switch( t ) { case LUA_TTABLE : lua_pushfstring( L, "(Table: %p)", lua_topointer( L, i ) ); lua_replace( L, i ); break; case LUA_TBOOLEAN : if( lua_toboolean( L, i ) ) { lua_pushstring( L, "(true)" ); } else { lua_pushstring( L, "(false)" ); } lua_replace(L, i); break; case LUA_TUSERDATA: { clock_t *c = ( clock_t * ) luaL_checkudata( L, i, "Lsyncd.jiffies" ); double d = *c; d /= clocks_per_sec; lua_pushfstring( L, "(Timestamp: %f)", d ); lua_replace( L, i ); } break; case LUA_TNIL: lua_pushstring( L, "(nil)" ); lua_replace( L, i ); break; } } } // concates if there is more than one string parameter lua_concat( L, lua_gettop( L ) - 1 ); message = luaL_checkstring( L, 2 ); logstring0( priority, cat, message ); return 0; } /* | Returns (on Lua stack) the current kernels | clock state (jiffies) */ extern int l_now(lua_State *L) { clock_t * j = lua_newuserdata( L, sizeof( clock_t ) ); luaL_getmetatable( L, "Lsyncd.jiffies" ); lua_setmetatable( L, -2 ); *j = times( dummy_tms ); return 1; } /* | Executes a subprocess. Does not wait for it to return. | | Params on Lua stack: | | 1: Path to binary to call | 2: List of string as arguments | or "<" in which case the next argument is a string | that will be piped on stdin. | The arguments will follow that one. | | Returns (Lua stack) the pid on success, 0 on failure. */ static int l_exec( lua_State *L ) { // the binary to call const char *binary = luaL_checkstring(L, 1); // number of arguments int argc = lua_gettop( L ) - 1; // the pid spawned pid_t pid; // the arguments position in the lua arguments int li = 1; // the pipe to text char const * pipe_text = NULL; // the pipes length size_t pipe_len = 0; // the arguments char const ** argv; // pipe file descriptors int pipefd[ 2 ]; int i; // expands tables // and removes nils for( i = 1; i <= lua_gettop( L ); i++ ) { if( lua_isnil( L, i ) ) { lua_remove( L, i ); i--; argc--; continue; } if( lua_istable( L, i ) ) { int tlen; int it; lua_checkstack( L, lua_gettop( L ) + lua_objlen( L, i ) + 1 ); // moves table to top of stack lua_pushvalue( L, i ); lua_remove( L, i ); argc--; tlen = lua_objlen( L, -1 ); for( it = 1; it <= tlen; it++ ) { lua_pushinteger( L, it ); lua_gettable( L, -2 ); lua_insert( L, i ); i++; argc++; } i--; lua_pop( L, 1 ); } } // writes a log message (if needed). if( check_logcat( "Exec" ) <= settings.log_level ) { lua_checkstack( L, lua_gettop( L ) + argc * 3 + 2 ); lua_pushvalue( L, 1 ); for( i = 1; i <= argc; i++ ) { lua_pushstring( L, " [" ); lua_pushvalue( L, i + 1 ); lua_pushstring( L, "]" ); } lua_concat( L, 3 * argc + 1 ); // replaces midfile 0 chars by linefeed size_t len = 0; const char * cs = lua_tolstring( L, -1, &len ); char * s = s_calloc( len + 1, sizeof( char ) ); for( i = 0; i < len; i++ ) { s[ i ] = cs[ i ] ? cs[ i ] : '\n'; } logstring0( LOG_DEBUG, "Exec", s ); free( s ); lua_pop( L, 1 ); } if( argc >= 2 && !strcmp( luaL_checkstring( L, 2 ), "<" ) ) { // pipes something into stdin if( !lua_isstring( L, 3 ) ) { logstring( "Error", "in spawn(), expected a string after pipe '<'" ); exit( -1 ); } pipe_text = lua_tolstring( L, 3, &pipe_len ); if( strlen( pipe_text ) > 0 ) { // creates the pipe if( pipe( pipefd ) == -1 ) { logstring( "Error", "cannot create a pipe!" ); exit( -1 ); } // always closes the write end for child processes close_exec_fd( pipefd[ 1 ] ); // sets the write end on non-blocking non_block_fd( pipefd[ 1 ] ); } else { pipe_text = NULL; } argc -= 2; li += 2; } // prepares the arguments argv = s_calloc( argc + 2, sizeof( char * ) ); argv[ 0 ] = binary; for( i = 1; i <= argc; i++ ) { argv[i] = luaL_checkstring( L, i + li ); } argv[ i ] = NULL; // the fork! pid = fork( ); if( pid == 0 ) { // replaces stdin for pipes if( pipe_text ) { dup2( pipefd[ 0 ], STDIN_FILENO ); } // if lsyncd runs as a daemon and has a logfile it will redirect // stdout/stderr of child processes to the logfile. if( is_daemon && settings.log_file ) { if( !freopen( settings.log_file, "a", stdout ) ) { printlogf( L, "Error", "cannot redirect stdout to '%s'.", settings.log_file ); } if( !freopen( settings.log_file, "a", stderr ) ) { printlogf( L, "Error", "cannot redirect stderr to '%s'.", settings.log_file ); } } execv( binary, ( char ** ) argv ); // in a sane world execv does not return! printlogf( L, "Error", "Failed executing [ %s ]!", binary ); exit( -1 ); } if( pipe_text ) { int len; // first closes read-end of pipe, this is for child process only close( pipefd[ 0 ] ); // starts filling the pipe len = write( pipefd[ 1 ], pipe_text, pipe_len ); if( len < 0 ) { logstring( "Normal", "immediatly broken pipe." ); close( pipefd[ 1 ] ); } else if( len == pipe_len ) { // usual and best case, the pipe accepted all input -> close close( pipefd[ 1 ] ); logstring( "Exec", "one-sweeped pipe" ); } else { struct pipemsg *pm; logstring( "Exec", "adding pipe observance" ); pm = s_calloc( 1, sizeof( struct pipemsg ) ); pm->text = s_calloc( pipe_len + 1, sizeof( char ) ); memcpy( pm->text, pipe_text, pipe_len + 1 ); pm->tlen = pipe_len; pm->pos = len; observe_fd( pipefd[ 1 ], NULL, pipe_writey, pipe_tidy, pm ); } } free( argv ); lua_pushnumber( L, pid ); return 1; } /* | Converts a relative directory path to an absolute. | | Params on Lua stack: | 1: a relative path to directory | | Returns on Lua stack: | The absolute path of directory */ static int l_realdir( lua_State *L ) { luaL_Buffer b; const char *rdir = luaL_checkstring(L, 1); char *adir = get_realpath(rdir); if (!adir) { printlogf(L, "Error", "failure getting absolute path of [%s]", rdir); return 0; } { // makes sure its a directory struct stat st; if (stat(adir, &st)) { printlogf(L, "Error", "cannot get absolute path of dir '%s': %s", rdir, strerror(errno)); free(adir); return 0; } if (!S_ISDIR(st.st_mode)) { printlogf(L, "Error", "cannot get absolute path of dir '%s': is not a directory", rdir); free(adir); return 0; } } // returns absolute path with a concated '/' luaL_buffinit(L, &b); luaL_addstring(&b, adir); luaL_addchar(&b, '/'); luaL_pushresult(&b); free(adir); return 1; } /* | Dumps the Lua stack. | For debugging purposes. */ int l_stackdump( lua_State * L ) { int i; int top = lua_gettop( L ); printlogf( L, "Debug", "total in stack %d", top ); for( i = 1; i <= top; i++ ) { int t = lua_type( L, i ); switch( t ) { case LUA_TSTRING: printlogf( L, "Debug", "%d string: '%s'", i, lua_tostring( L, i ) ); break; case LUA_TBOOLEAN: printlogf( L, "Debug", "%d boolean %s", i, lua_toboolean( L, i ) ? "true" : "false" ); break; case LUA_TNUMBER: printlogf( L, "Debug", "%d number: %g", i, lua_tonumber( L, i ) ); break; default: printlogf( L, "Debug", "%d %s", i, lua_typename( L, t ) ); break; } } return 0; } /* | Reads the directories entries. | | Params on Lua stack: | 1: absolute path to directory | | Returns on Lua stack: | a table of directory names. | names are keys | values are boolean true on dirs. */ static int l_readdir ( lua_State *L ) { const char * dirname = luaL_checkstring( L, 1 ); DIR *d; d = opendir( dirname ); if( d == NULL ) { printlogf( L, "Error", "cannot open dir [%s].", dirname ); return 0; } lua_newtable( L ); while( !hup && !term ) { struct dirent *de = readdir( d ); bool isdir; if( de == NULL ) { // finished break; } // ignores . and .. if( !strcmp( de->d_name, "." ) || !strcmp( de->d_name, ".." ) ) { continue; } if( de->d_type == DT_UNKNOWN ) { // must call stat on some systems :-/ // ( e.g. ReiserFS ) char *entry = s_malloc( strlen( dirname ) + strlen( de->d_name ) + 2 ); struct stat st; strcpy( entry, dirname ); strcat( entry, "/" ); strcat( entry, de->d_name ); lstat( entry, &st ); isdir = S_ISDIR( st.st_mode ); free( entry ); } else { // otherwise readdir can be trusted isdir = de->d_type == DT_DIR; } // adds this entry to the Lua table lua_pushstring( L, de->d_name ); lua_pushboolean( L, isdir ); lua_settable( L, -3 ); } closedir( d ); return 1; } /* | Terminates Lsyncd. | | Params on Lua stack: | 1: exitcode of Lsyncd. | | Does not return. | */ int l_terminate(lua_State *L) { int exitcode = luaL_checkinteger( L, 1 ); exit( exitcode ); return 0; } /* | Configures core parameters. | | Params on Lua stack: | 1: a string, configure option | 2: depends on Param 1 */ static int l_configure( lua_State *L ) { const char * command = luaL_checkstring( L, 1 ); if( !strcmp( command, "running" ) ) { // set by runner after first initialize // from this on log to configurated log end instead of // stdout/stderr first_time = false; if( !settings.nodaemon && !settings.log_file ) { settings.log_syslog = true; const char * log_ident = settings.log_ident ? settings.log_ident : "lsyncd"; openlog( log_ident, 0, settings.log_facility ); } if( !settings.nodaemon && !is_daemon ) { logstring( "Debug", "daemonizing now." ); daemonize( L ); } if( settings.pidfile ) { write_pidfile( L, settings.pidfile ); } } else if( !strcmp( command, "nodaemon" ) ) { settings.nodaemon = true; } else if( !strcmp( command, "logfile" ) ) { const char * file = luaL_checkstring( L, 2 ); if( settings.log_file ) { free( settings.log_file ); } settings.log_file = s_strdup( file ); } else if( !strcmp( command, "pidfile" ) ) { const char * file = luaL_checkstring( L, 2 ); if( settings.pidfile ) { free( settings.pidfile ); } settings.pidfile = s_strdup( file ); } else if( !strcmp( command, "logfacility" ) ) { if( lua_isstring( L, 2 ) ) { const char * fname = luaL_checkstring( L, 2 ); int i; for( i = 0; facilitynames[ i ].c_name; i++ ) { if( !strcasecmp( fname, facilitynames[ i ].c_name ) ) { break; } } if( !facilitynames[ i ].c_name ) { printlogf( L, "Error", "Logging facility '%s' unknown.", fname ); exit( -1 ); } settings.log_facility = facilitynames[ i ].c_val; } else if (lua_isnumber(L, 2)) { settings.log_facility = luaL_checknumber(L, 2); } else { printlogf( L, "Error", "Logging facility must be a number or string" ); exit( -1 ); } } else if( !strcmp( command, "logident" ) ) { const char * ident = luaL_checkstring( L, 2 ); if (settings.log_ident) { free(settings.log_ident); } settings.log_ident = s_strdup( ident ); } else { printlogf( L, "Error", "Internal error, unknown parameter in l_configure( %s )", command ); exit( -1 ); } return 0; } /* | Allows user scripts to observe filedescriptors | | Params on Lua stack: | 1: file descriptor | 2: function to call when read becomes ready | 3: function to call when write becomes ready */ static int l_observe_fd( lua_State *L ) { int fd = luaL_checknumber( L, 1 ); bool ready = false; bool writey = false; // Stores the user function in the lua registry. // It uses the address of the cores ready/write functions // for the user as key if( !lua_isnoneornil( L, 2 ) ) { lua_pushlightuserdata( L, (void *) user_obs_ready ); lua_gettable( L, LUA_REGISTRYINDEX ); if( lua_isnil( L, -1 ) ) { lua_pop ( L, 1 ); lua_newtable ( L ); lua_pushlightuserdata ( L, (void *) user_obs_ready ); lua_pushvalue ( L, -2 ); lua_settable ( L, LUA_REGISTRYINDEX ); } lua_pushnumber ( L, fd ); lua_pushvalue ( L, 2 ); lua_settable ( L, -3 ); lua_pop ( L, 1 ); ready = true; } if( !lua_isnoneornil( L, 3 ) ) { lua_pushlightuserdata( L, (void *) user_obs_writey ); lua_gettable (L, LUA_REGISTRYINDEX ); if( lua_isnil(L, -1) ) { lua_pop ( L, 1 ); lua_newtable ( L ); lua_pushlightuserdata ( L, (void *) user_obs_writey ); lua_pushvalue ( L, -2 ); lua_settable ( L, LUA_REGISTRYINDEX ); } lua_pushnumber ( L, fd ); lua_pushvalue ( L, 3 ); lua_settable ( L, -3 ); lua_pop ( L, 1 ); writey = true; } // tells the core to watch the fd observe_fd( fd, ready ? user_obs_ready : NULL, writey ? user_obs_writey : NULL, user_obs_tidy, NULL ); return 0; } /* | Removes a user observance | | Params on Lua stack: | 1: exitcode of Lsyncd. */ extern int l_nonobserve_fd( lua_State *L ) { int fd = luaL_checknumber( L, 1 ); // removes the read function lua_pushlightuserdata( L, (void *) user_obs_ready ); lua_gettable( L, LUA_REGISTRYINDEX ); if( !lua_isnil( L, -1 ) ) { lua_pushnumber ( L, fd ); lua_pushnil ( L ); lua_settable ( L, -2 ); } lua_pop( L, 1 ); lua_pushlightuserdata( L, (void *) user_obs_writey ); lua_gettable( L, LUA_REGISTRYINDEX ); if ( !lua_isnil( L, -1 ) ) { lua_pushnumber ( L, fd ); lua_pushnil ( L ); lua_settable ( L, -2 ); } lua_pop( L, 1 ); nonobserve_fd( fd ); return 0; } /* | The Lsnycd's core library */ static const luaL_Reg lsyncdlib[] = { { "configure", l_configure }, { "exec", l_exec }, { "log", l_log }, { "now", l_now }, { "nonobserve_fd", l_nonobserve_fd }, { "observe_fd", l_observe_fd }, { "readdir", l_readdir }, { "realdir", l_realdir }, { "stackdump", l_stackdump }, { "terminate", l_terminate }, { NULL, NULL } }; /* | Adds a number in seconds to a jiffy timestamp. */ static int l_jiffies_add( lua_State *L ) { clock_t *p1 = ( clock_t * ) lua_touserdata( L, 1 ); clock_t *p2 = ( clock_t * ) lua_touserdata( L, 2 ); if( p1 && p2 ) { logstring( "Error", "Cannot add two timestamps!" ); exit( -1 ); } { clock_t a1 = p1 ? *p1 : luaL_checknumber( L, 1 ) * clocks_per_sec; clock_t a2 = p2 ? *p2 : luaL_checknumber( L, 2 ) * clocks_per_sec; clock_t *r = ( clock_t * ) lua_newuserdata( L, sizeof( clock_t ) ); luaL_getmetatable( L, "Lsyncd.jiffies" ); lua_setmetatable( L, -2 ); *r = a1 + a2; return 1; } } /* | Subracts two jiffy timestamps resulting in a number in seconds | or substracts a jiffy by a number in seconds resulting a jiffy timestamp. */ static int l_jiffies_sub( lua_State *L ) { clock_t *p1 = ( clock_t * ) lua_touserdata( L, 1 ); clock_t *p2 = ( clock_t * ) lua_touserdata( L, 2 ); if( p1 && p2 ) { // substracting two timestamps result in a timespan in seconds clock_t a1 = *p1; clock_t a2 = *p2; lua_pushnumber(L, ((double) (a1 -a2)) / clocks_per_sec); return 1; } // makes a timestamp earlier by NUMBER seconds clock_t a1 = p1 ? *p1 : luaL_checknumber( L, 1 ) * clocks_per_sec; clock_t a2 = p2 ? *p2 : luaL_checknumber( L, 2 ) * clocks_per_sec; clock_t *r = (clock_t *) lua_newuserdata( L, sizeof( clock_t ) ); luaL_getmetatable( L, "Lsyncd.jiffies" ); lua_setmetatable( L, -2 ); *r = a1 - a2; return 1; } /* | Compares two jiffy timestamps */ static int l_jiffies_eq( lua_State *L ) { clock_t a1 = ( *( clock_t * ) luaL_checkudata( L, 1, "Lsyncd.jiffies" ) ); clock_t a2 = ( *( clock_t * ) luaL_checkudata( L, 2, "Lsyncd.jiffies" ) ); lua_pushboolean( L, a1 == a2 ); return 1; } /* * True if jiffy1 timestamp is eariler than jiffy2 timestamp */ static int l_jiffies_lt( lua_State *L ) { clock_t a1 = ( *( clock_t * ) luaL_checkudata( L, 1, "Lsyncd.jiffies" ) ); clock_t a2 = ( *( clock_t * ) luaL_checkudata( L, 2, "Lsyncd.jiffies" ) ); lua_pushboolean( L, time_before( a1, a2 ) ); return 1; } /* | True if jiffy1 before or equals jiffy2 */ static int l_jiffies_le(lua_State *L) { clock_t a1 = ( *( clock_t * ) luaL_checkudata( L, 1, "Lsyncd.jiffies" ) ); clock_t a2 = ( *( clock_t * ) luaL_checkudata( L, 2, "Lsyncd.jiffies" ) ); lua_pushboolean( L, ( a1 == a2 ) || time_before( a1, a2 ) ); return 1; } /* | Registers the Lsyncd's core library. */ void register_lsyncd( lua_State *L ) { luaL_register( L, LSYNCD_LIBNAME, lsyncdlib ); lua_setglobal( L, LSYNCD_LIBNAME ); // creates the metatable for the jiffies ( timestamps ) userdata luaL_newmetatable( L, "Lsyncd.jiffies" ); int mt = lua_gettop( L ); lua_pushcfunction( L, l_jiffies_add ); lua_setfield( L, mt, "__add" ); lua_pushcfunction( L, l_jiffies_sub ); lua_setfield( L, mt, "__sub" ); lua_pushcfunction( L, l_jiffies_lt ); lua_setfield( L, mt, "__lt" ); lua_pushcfunction( L, l_jiffies_le ); lua_setfield( L, mt, "__le" ); lua_pushcfunction( L, l_jiffies_eq ); lua_setfield( L, mt, "__eq" ); lua_pop( L, 1 ); // pop(mt) #ifdef LSYNCD_WITH_INOTIFY lua_getglobal( L, LSYNCD_LIBNAME ); register_inotify( L ); lua_setfield( L, -2, LSYNCD_INOTIFYLIBNAME ); lua_pop( L, 1 ); #endif if( lua_gettop( L ) ) { logstring( "Error", "internal, stack not empty in lsyncd_register( )" ); exit( -1 ); } } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Lsyncd Core ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | Pushes a function from the runner on the stack. | As well as the callError handler. */ extern void load_runner_func( lua_State * L, const char * name ) { printlogf( L, "Call", "%s( )", name ); // pushes the error handler lua_pushlightuserdata( L, (void *) &callError ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the function lua_pushlightuserdata( L, (void *) &runner ); lua_gettable( L, LUA_REGISTRYINDEX ); lua_pushstring( L, name ); lua_gettable( L, -2 ); lua_remove( L, -2 ); } /* | Daemonizes. | | Lsyncds own implementation over daemon(0, 0) since | a) OSX keeps bugging about it being deprecated | b) for a reason, since blindly closing stdin/out/err | is unsafe, since they might not have existed and | might actually close the monitors fd! */ static void daemonize( lua_State *L ) { pid_t pid, sid; pid = fork( ); if( pid < 0 ) { printlogf( L, "Error", "Failure in daemonize at fork: %s", strerror( errno ) ); exit( -1 ); } if (pid > 0) { // parent process returns to shell exit( 0 ); } // detaches the new process from the parent process sid = setsid( ); if( sid < 0 ) { printlogf( L, "Error", "Failure in daemonize at setsid: %s", strerror( errno ) ); exit( -1 ); } // goes to root dir if( chdir( "/" ) < 0 ) { printlogf( L, "Error", "Failure in daemonize at chdir( \"/\" ): %s", strerror( errno ) ); exit( -1 ); } // does what clibs daemon( 0, 0 ) cannot do, // checks if there were no stdstreams and it might close used fds if( observances_len && observances->fd < 3 ) { printlogf( L, "Normal", "daemonize not closing stdin/out/err, since there seem to none." ); return; } // disconnects stdstreams if ( !freopen( "/dev/null", "r", stdin ) || !freopen( "/dev/null", "w", stdout ) || !freopen( "/dev/null", "w", stderr ) ) { printlogf( L, "Error", "Failure in daemonize at freopen( /dev/null, std[in|out|err] )" ); } is_daemon = true; } /* | Normal operation happens in here. */ static void masterloop(lua_State *L) { while( true ) { bool have_alarm; bool force_alarm = false; clock_t now = times( dummy_tms ); clock_t alarm_time = 0; // memory usage debugging // lua_gc( L, LUA_GCCOLLECT, 0 ); // printf( // "gccount: %d\n", // lua_gc( L, LUA_GCCOUNT, 0 ) * 1024 + lua_gc( L, LUA_GCCOUNTB, 0 ) ); // // queries the runner about the soonest alarm // load_runner_func( L, "getAlarm" ); if( lua_pcall( L, 0, 1, -2 ) ) { exit( -1 ); } if( lua_type( L, -1 ) == LUA_TBOOLEAN) { have_alarm = false; force_alarm = lua_toboolean( L, -1 ); } else { have_alarm = true; alarm_time = *( ( clock_t * ) luaL_checkudata( L, -1, "Lsyncd.jiffies" ) ); } lua_pop( L, 2 ); if( force_alarm || ( have_alarm && time_before_eq( alarm_time, now ) ) ) { // there is a delay that wants to be handled already thus instead // of reading/writing from observances it jumps directly to // handling // TODO: Actually it might be smarter to handler observances // eitherway. since event queues might overflow. logstring( "Masterloop", "immediately handling delays." ); } else { // uses select( ) to determine what happens next: // a) a new event on an observance // b) an alarm on timeout // c) the return of a child process struct timespec tv; if( have_alarm ) { // TODO use trunc instead of long converstions double d = ( (double )( alarm_time - now ) ) / clocks_per_sec; tv.tv_sec = d; tv.tv_nsec = ( (d - ( long ) d) ) * 1000000000.0; printlogf( L, "Masterloop", "going into select ( timeout %f seconds )", d ); } else { logstring( "Masterloop", "going into select ( no timeout )" ); } // time for Lsyncd to try to put itself to rest into the big select( ) // this configures: // timeouts, // filedescriptors and // signals // that will wake Lsyncd { fd_set rfds; fd_set wfds; sigset_t sigset; int pi, pr; sigemptyset( &sigset ); FD_ZERO( &rfds ); FD_ZERO( &wfds ); for( pi = 0; pi < observances_len; pi++ ) { struct observance *obs = observances + pi; if ( obs->ready ) { FD_SET( obs->fd, &rfds ); } if ( obs->writey ) { FD_SET( obs->fd, &wfds ); } } if( !observances_len ) { logstring( "Error", "Internal fail, no observances, no monitor!" ); exit( -1 ); } // the great select, this is the very heart beat of Lsyncd // that puts Lsyncd to sleep until anything worth noticing // happens pr = pselect( observances[ observances_len - 1 ].fd + 1, &rfds, &wfds, NULL, have_alarm ? &tv : NULL, &sigset ); // something happened! if (pr >= 0) { // walks through the observances calling ready/writey observance_action = true; for( pi = 0; pi < observances_len; pi++ ) { struct observance *obs = observances + pi; // Checks for signals if( hup || term ) { break; } // a file descriptor became read-ready if( obs->ready && FD_ISSET( obs->fd, &rfds ) ) { obs->ready(L, obs); } // Checks for signals, again, better safe than sorry if (hup || term) { break; } // FIXME breaks on multiple nonobservances in one beat if( nonobservances_len > 0 && nonobservances[ nonobservances_len - 1 ] == obs->fd ) { continue; } // a file descriptor became write-ready if( obs->writey && FD_ISSET( obs->fd, &wfds ) ) { obs->writey( L, obs ); } } observance_action = false; // works through delayed nonobserve_fd() calls for (pi = 0; pi < nonobservances_len; pi++) { nonobserve_fd(nonobservances[pi]); } nonobservances_len = 0; } } } // collects zombified child processes while( 1 ) { int status; pid_t pid = waitpid( 0, &status, WNOHANG ); if (pid <= 0) { // no more zombies break; } // calls the runner to handle the collection load_runner_func( L, "collectProcess" ); lua_pushinteger( L, pid ); lua_pushinteger( L, WEXITSTATUS( status ) ); if ( lua_pcall( L, 2, 0, -4 ) ) { exit(-1); } lua_pop( L, 1 ); } // reacts on HUP signals if( hup ) { load_runner_func( L, "hup" ); if( lua_pcall( L, 0, 0, -2 ) ) { exit( -1 ); } lua_pop( L, 1 ); hup = 0; } // reacts on TERM signals if( term == 1 ) { load_runner_func( L, "term" ); if( lua_pcall( L, 0, 0, -2 ) ) { exit( -1 ); } lua_pop( L, 1 ); term = 2; } // lets the runner do stuff every cycle, // like starting new processes, writing the statusfile etc. load_runner_func( L, "cycle" ); l_now( L ); if( lua_pcall( L, 1, 1, -3 ) ) { exit( -1 ); } if( !lua_toboolean( L, -1 ) ) { // cycle told core to break mainloop lua_pop( L, 2 ); return; } lua_pop( L, 2 ); if( lua_gettop( L ) ) { logstring( "Error", "internal, stack is dirty." ); l_stackdump( L ); exit( -1 ); } } } /* | The effective main for one run. | HUP signals may cause several runs of the one main. */ int main1( int argc, char *argv[] ) { // the Lua interpreter lua_State * L; // the runner file char * lsyncd_runner_file = NULL; int argp = 1; // load Lua L = luaL_newstate( ); luaL_openlibs( L ); { // checks the lua version const char * version; int major, minor; lua_getglobal( L, "_VERSION" ); version = luaL_checkstring( L, -1 ); if( sscanf( version, "Lua %d.%d", &major, &minor ) != 2 ) { fprintf( stderr, "cannot parse lua library version!\n" ); exit (-1 ); } if( major < 5 || (major == 5 && minor < 1) ) { fprintf( stderr, "Lua library is too old. Needs 5.1 at least" ); exit( -1 ); } lua_pop( L, 1 ); } { // logging is prepared quite early int i = 1; add_logcat( "Normal", LOG_NOTICE ); add_logcat( "Warn", LOG_WARNING ); add_logcat( "Error", LOG_ERR ); while( i < argc ) { if( strcmp( argv[ i ], "-log" ) && strcmp( argv[ i ], "--log" ) ) { // arg is neither -log or --log i++; continue; } if( ++i >= argc ) { // -(-)log was last argument break; } if( !add_logcat( argv[ i ], LOG_NOTICE ) ) { printlogf( L, "Error", "'%s' is not a valid logging category", argv[ i ] ); exit( -1 ); } } } // registers Lsycnd's core library register_lsyncd( L ); if( check_logcat( "Debug" ) <= settings.log_level ) { // printlogf doesnt support %ld :-( printf( "kernels clocks_per_sec=%ld\n", clocks_per_sec ); } // checks if the user overrode the default runner file if( argp < argc && !strcmp( argv[ argp ], "--runner" ) ) { if (argp + 1 >= argc) { logstring( "Error", "Lsyncd Lua-runner file missing after --runner " ); exit( -1 ); } lsyncd_runner_file = argv[ argp + 1 ]; argp += 2; } if( lsyncd_runner_file ) { // checks if the runner file exists struct stat st; if( stat( lsyncd_runner_file, &st ) ) { printlogf( L, "Error", "Cannot see a runner at '%s'.", lsyncd_runner_file ); exit( -1 ); } // loads the runner file if( luaL_loadfile(L, lsyncd_runner_file ) ) { printlogf( L, "Error", "error loading '%s': %s", lsyncd_runner_file, lua_tostring( L, -1 ) ); exit( -1 ); } } else { // loads the runner from binary if( luaL_loadbuffer( L, runner_out, runner_size, "runner" ) ) { printlogf( L, "Error", "error loading precompiled runner: %s", lua_tostring( L, -1 ) ); exit( -1 ); } } // prepares the runner executing the script { if( lua_pcall( L, 0, LUA_MULTRET, 0 ) ) { printlogf( L, "Error", "preparing runner: %s", lua_tostring( L, -1 ) ); exit( -1 ); } lua_pushlightuserdata( L, (void *) & runner ); // switches the value ( result of preparing ) and the key &runner lua_insert( L, 1 ); // saves the table of the runners functions in the lua registry lua_settable( L, LUA_REGISTRYINDEX ); // saves the error function extras // &callError is the key lua_pushlightuserdata ( L, (void *) &callError ); // &runner[ callError ] the value lua_pushlightuserdata ( L, (void *) &runner ); lua_gettable ( L, LUA_REGISTRYINDEX ); lua_pushstring ( L, "callError" ); lua_gettable ( L, -2 ); lua_remove ( L, -2 ); lua_settable ( L, LUA_REGISTRYINDEX ); } // asserts the Lsyncd's version matches // between runner and core { const char *lversion; lua_getglobal( L, "lsyncd_version" ); lversion = luaL_checkstring( L, -1 ); if( strcmp( lversion, PACKAGE_VERSION ) ) { printlogf( L, "Error", "Version mismatch '%s' is '%s', but core is '%s'", lsyncd_runner_file ? lsyncd_runner_file : "( internal runner )", lversion, PACKAGE_VERSION ); exit( -1 ); } lua_pop( L, 1 ); } // loads the defaults from binary { if( luaL_loadbuffer( L, defaults_out, defaults_size, "defaults" ) ) { printlogf( L, "Error", "loading defaults: %s", lua_tostring( L, -1 ) ); exit( -1 ); } // prepares the defaults if (lua_pcall( L, 0, 0, 0 ) ) { printlogf( L, "Error", "preparing defaults: %s", lua_tostring( L, -1 ) ); exit( -1 ); } } // checks if there is a "-help" or "--help" { int i; for( i = argp; i < argc; i++ ) { if ( !strcmp( argv[ i ], "-help" ) || !strcmp( argv[ i ], "--help" ) ) { load_runner_func(L, "help"); if (lua_pcall(L, 0, 0, -2)) { exit( -1 ); } lua_pop( L, 1 ); exit( 0 ); } } } // starts the option parser in Lua script { int idx = 1; const char *s; // creates a table with all remaining argv option arguments load_runner_func( L, "configure" ); lua_newtable( L ); while( argp < argc ) { lua_pushnumber ( L, idx++ ); lua_pushstring ( L, argv[ argp++ ] ); lua_settable ( L, -3 ); } // creates a table with the cores event monitor interfaces idx = 0; lua_newtable( L ); while( monitors[ idx ] ) { lua_pushnumber ( L, idx + 1 ); lua_pushstring ( L, monitors[ idx++ ] ); lua_settable ( L, -3 ); } if( lua_pcall( L, 2, 1, -4 ) ) { exit( -1 ); } if( first_time ) { // If not first time, simply retains the config file given s = lua_tostring(L, -1); if( s ) { lsyncd_config_file = s_strdup( s ); } } lua_pop( L, 2 ); } // checks existence of the config file if( lsyncd_config_file ) { struct stat st; // gets the absolute path to the config file // so in case of HUPing the daemon, it finds it again char * apath = get_realpath( lsyncd_config_file ); if( !apath ) { printlogf( L, "Error", "Cannot find config file at '%s'.", lsyncd_config_file ); exit( -1 ); } free( lsyncd_config_file ); lsyncd_config_file = apath; if( stat( lsyncd_config_file, &st ) ) { printlogf( L, "Error", "Cannot find config file at '%s'.", lsyncd_config_file ); exit( -1 ); } // loads and executes the config file if( luaL_loadfile( L, lsyncd_config_file ) ) { printlogf( L, "Error", "error loading %s: %s", lsyncd_config_file, lua_tostring( L, -1 ) ); exit( -1 ); } if( lua_pcall( L, 0, LUA_MULTRET, 0) ) { printlogf( L, "Error", "error preparing %s: %s", lsyncd_config_file, lua_tostring( L, -1 ) ); exit( -1 ); } } #ifdef LSYNCD_WITH_INOTIFY open_inotify( L ); #endif #ifdef LSYNCD_WITH_FSEVENTS open_fsevents( L ); #endif // adds signal handlers // listens to SIGCHLD, but blocks it until pselect( ) // opens the signal handler up { sigset_t set; sigemptyset( &set ); sigaddset( &set, SIGCHLD ); signal( SIGCHLD, sig_child ); sigprocmask( SIG_BLOCK, &set, NULL ); signal( SIGHUP, sig_handler ); signal( SIGTERM, sig_handler ); } // runs initializations from runner // it will set the configuration and add watches { load_runner_func( L, "initialize" ); lua_pushboolean( L, first_time ); if( lua_pcall( L, 1, 0, -3 ) ) { exit( -1 ); } lua_pop( L, 1 ); } // // enters the master loop // masterloop( L ); // // cleanup // // tidies up all observances { int i; for( i = 0; i < observances_len; i++ ) { struct observance *obs = observances + i; obs->tidy( obs ); } observances_len = 0; nonobservances_len = 0; } // frees logging categories { int ci; struct logcat *lc; for( ci = 'A'; ci <= 'Z'; ci++ ) { for( lc = logcats[ ci - 'A' ]; lc && lc->name; lc++) { free( lc->name ); lc->name = NULL; } if( logcats[ci - 'A' ] ) { free( logcats[ ci - 'A' ] ); logcats[ ci - 'A' ] = NULL; } } } lua_close( L ); return 0; } /* | Main */ int main( int argc, char * argv[ ] ) { // gets a kernel parameter clocks_per_sec = sysconf( _SC_CLK_TCK ); while( !term ) { main1( argc, argv ); } // exits with 143 since it got a kill signal return 143; }
gpl-2.0
wjin/algorithm
leetcode/binary_tree_level_order_traversal.cpp
1
2462
/* Binary Tree Level Order Traversal Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level). For example: Given binary tree {3,9,20,#,#,15,7}, 3 / \ 9 20 / \ 15 7 return its level order traversal as: [ [3], [9,20], [15,7] ] */ #include <iostream> #include <vector> #include <queue> using namespace std; //Definition for binary tree struct TreeNode { int val; TreeNode *left; TreeNode *right; TreeNode(int x) : val(x), left(NULL), right(NULL) { } }; class Solution { public: vector<vector<int> > levelOrder(TreeNode *root) { queue<TreeNode*> q; TreeNode *cur = root; vector<vector<int>> ret; int levelSize = 0; if (cur) q.push(cur); while (!q.empty()) { // judge whether start a new level if (levelSize == 0) { levelSize = q.size(); ret.push_back(vector<int>()); // } cur = q.front(); q.pop(); // add element for this level ret.back().push_back(cur->val); levelSize--; if (cur->left != nullptr) q.push(cur->left); if (cur->right != nullptr) q.push(cur->right); } return ret; } }; // recursively class Solution2 { public: vector<vector<int>> levelOrder(TreeNode *root) { vector<vector<int>> ret; levelOrder(root, ret, 0); return ret; } private: void levelOrder(TreeNode *t, vector<vector<int>> &ret, size_t level) { if (!t) return; if (level >= ret.size()) ret.push_back(vector<int>()); ret[level].push_back(t->val); levelOrder(t->left, ret, level + 1); levelOrder(t->right, ret, level + 1); } }; void print_ret(vector<vector<int>> &ret) { for (auto &row : ret) { for (auto col : row) cout << col << ' '; cout << endl; } } int main(int argc, char *argv[]) { Solution sol; Solution2 sol2; TreeNode *root = new TreeNode(3); root->left = new TreeNode(9); root->right = new TreeNode(20); root->right->left = new TreeNode(15); root->right->right = new TreeNode(7); vector<vector<int>> ret = sol.levelOrder(root); print_ret(ret); ret = sol2.levelOrder(root); print_ret(ret); return 0; }
gpl-2.0
zyzil/zhed
src/zhed/PhysicalDrive.cpp
1
5414
/* zhed - Free hex editor based on Frhed Copyright (C) 2000 Raihan Kibria Copyright (C) 2016 Kevin Mullins This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. Last change: 2013-02-24 by Jochen Neubeck */ /** * @file PhysicalDrive.cpp * * @brief Drive/partition information class implementations. * */ #include "precomp.h" #include "resource.h" #include "physicaldrive.h" #include "pdrivent.h" #include "LangArray.h" #include "StringTable.h" IPhysicalDrive* CreatePhysicalDriveInstance() { return new PNtPhysicalDrive; } LPTSTR PartitionInfo::GetNameAsString(PFormat* pFormat) { if (m_bIsPartition) { pFormat->Format(GetLangString(IDS_DRIVES_DRIVE_PART), m_dwDrive + 1, m_dwPartition + 1, GetSizeAsString()); } else { pFormat->Format(GetLangString(IDS_DRIVES_DRIVE_ONLY), m_dwDrive + 1, GetSizeAsString()); } return pFormat->buffer; } LPTSTR PartitionInfo::GetSizeAsString(PFormat* pFormat) { const double sizeInMB = (double) m_PartitionLength / (1024 * 1024); if (sizeInMB < 1024.0) { //fmt += GetLangString(IDS_ISO_MB); pFormat->Format(_T("%.2f MB"), sizeInMB); } else { const double sizeInGB = sizeInMB / 1024; if (sizeInGB < 1024.0) { //fmt += GetLangString(IDS_ISO_GB); pFormat->Format(_T("%.2f GB"), sizeInGB); } else { //fmt += GetLangString(IDS_ISO_TB); pFormat->Format(_T("%.2f TB"), sizeInGB / 1024); } } return pFormat->buffer; } void IPhysicalDrive::GetPartitionInfo(PList* lpList) { lpList->DeleteContents(); BYTE bLayoutInfo[20240]; DISK_GEOMETRY dg; for (int iDrive = 0; iDrive < 8; iDrive++) { if (!Open(iDrive)) continue; if (GetDriveGeometryEx((DISK_GEOMETRY_EX*) bLayoutInfo, sizeof(bLayoutInfo))) { DISK_GEOMETRY& dgref = (((DISK_GEOMETRY_EX*)bLayoutInfo)->Geometry); dg = dgref; PartitionInfo* p = new PartitionInfo(); p->m_dwDrive = (DWORD) iDrive; p->m_dwPartition = 0; p->m_bIsPartition = TRUE; p->m_dwBytesPerSector = dg.BytesPerSector; p->m_NumberOfSectors = dg.Cylinders.QuadPart; p->m_NumberOfSectors *= dg.SectorsPerTrack; p->m_NumberOfSectors *= dg.TracksPerCylinder; p->m_StartingOffset = 0; p->m_StartingSector = 0; p->m_PartitionLength = p->m_NumberOfSectors; p->m_PartitionLength *= dg.BytesPerSector; lpList->AddTail(p); if (GetDriveLayoutEx(bLayoutInfo, sizeof(bLayoutInfo))) { PDRIVE_LAYOUT_INFORMATION_EX pLI = (PDRIVE_LAYOUT_INFORMATION_EX)bLayoutInfo; for (DWORD iPartition = 0; iPartition < pLI->PartitionCount; iPartition++) { PARTITION_INFORMATION_EX* pi = &(pLI->PartitionEntry[iPartition]); PartitionInfo* p = new PartitionInfo(); p->m_dwDrive = (DWORD) iDrive; p->m_dwPartition = (DWORD) iPartition; p->m_bIsPartition = TRUE; p->m_dwBytesPerSector = dg.BytesPerSector; p->m_NumberOfSectors = pi->PartitionLength.QuadPart; p->m_NumberOfSectors /= dg.BytesPerSector; p->m_StartingOffset = pi->StartingOffset.QuadPart; p->m_StartingSector = p->m_StartingOffset; p->m_StartingSector /= dg.BytesPerSector; p->m_PartitionLength = pi->PartitionLength.QuadPart; lpList->AddTail(p); } } } else { if (GetDriveGeometry(&dg)) { PartitionInfo* p = new PartitionInfo(); p->m_dwDrive = (DWORD) iDrive; p->m_dwPartition = 0; p->m_bIsPartition = FALSE; p->m_dwBytesPerSector = dg.BytesPerSector; p->m_NumberOfSectors = dg.Cylinders.QuadPart; p->m_NumberOfSectors *= dg.SectorsPerTrack; p->m_NumberOfSectors *= dg.TracksPerCylinder; p->m_StartingOffset = 0; p->m_StartingSector = 0; p->m_PartitionLength = p->m_NumberOfSectors; p->m_PartitionLength *= dg.BytesPerSector; lpList->AddTail(p); if (GetDriveLayout(bLayoutInfo, sizeof(bLayoutInfo))) { PDRIVE_LAYOUT_INFORMATION pLI = (PDRIVE_LAYOUT_INFORMATION)bLayoutInfo; for (DWORD iPartition = 0; iPartition < pLI->PartitionCount; iPartition++) { PARTITION_INFORMATION* pi = &(pLI->PartitionEntry[iPartition]); if (!pi->PartitionLength.QuadPart) continue; PartitionInfo* p = new PartitionInfo(); p->m_dwDrive = (DWORD) iDrive; p->m_dwPartition = (DWORD) iPartition; p->m_bIsPartition = TRUE; p->m_dwBytesPerSector = dg.BytesPerSector; p->m_NumberOfSectors = pi->PartitionLength.QuadPart; p->m_NumberOfSectors /= dg.BytesPerSector; p->m_StartingOffset = pi->StartingOffset.QuadPart; p->m_StartingSector = p->m_StartingOffset; p->m_StartingSector /= dg.BytesPerSector; p->m_PartitionLength = pi->PartitionLength.QuadPart; lpList->AddTail(p); } } } } Close(); } }
gpl-2.0
kchau101/libHD44780
src/libhd44780.c
1
2839
#include "libhd44780.h" #include "delay.h" void lcd_init(struct HD44780_Device *device) { if (device->config.datalength == LCD_4BIT_MODE) { lcd_func_set(device); lcd_func_set(device); lcd_func_set(device); } else { lcd_func_set(device); } } void lcd_clear_display(struct HD44780_Device *device) { lcd_write_data(device, 0x01); } void lcd_cursor_home(struct HD44780_Device *device) { lcd_write_data(device, 0x02); } void lcd_entry_mode_set(struct HD44780_Device *device, uint8_t command) { lcd_write_data(device, (command | (1 << 2))); } void lcd_display_control(struct HD44780_Device *device, uint8_t command) { lcd_write_data(device, (command | (1<<3))); } void lcd_shift(struct HD44780_Device *device, uint8_t command) { lcd_write_data(device, (command | (1 << 4))); } void lcd_func_set(struct HD44780_Device *device) { //Special write to configure settings, especially if 4-bit mode uint16_t settings; if (device->config.datalength == LCD_8BIT_MODE) { settings = ( (device->config.font << device->PINS.BITS.pinB2) | (device->config.numlines << device->PINS.BITS.pinB3) | (device->config.datalength << device->PINS.BITS.pinB4) | (1 << device->PINS.BITS.pinB5) ); device->gpioport->ODR = settings; lcd_clk_pulse(device); } else { settings = ( (device->config.datalength << device->PINS.BITS.pinB4) | (1 << device->PINS.BITS.pinB5)); device->gpioport->ODR = settings; lcd_clk_pulse(device); } } void lcd_set_cgram_addr(struct HD44780_Device *device, uint8_t addr) { lcd_write_data(device, (addr | (1 << 6))); } void lcd_set_ddram_addr(struct HD44780_Device *device, uint8_t addr) { lcd_write_data(device, (addr | (1 << 7))); } void lcd_write_ram(struct HD44780_Device *device, uint8_t addr) { uint16_t command; command = addr | (1 << 8); lcd_write_data(device, command); } void lcd_write_data(struct HD44780_Device *device, uint16_t data) { //data format is RS, B7, B6...B0 uint16_t output = 0x0000; if (device->config.datalength == LCD_8BIT_MODE) { for (uint8_t i=0; i < 9; i++) // Only write Bits 7..0 and RS { if ((data & (1<< i)) > 0) { output |= (1 << device->PINS.pin[i]); } } device->gpioport->ODR = output; lcd_clk_pulse(device); } else { output |= (((data & (1 << 9)) >> 9) << device->PINS.BITS.pinRS); for (uint8_t i=0; i < 2; i++) { for (uint8_t j=0; j< 4; j++) { if ((data & (1<< j)) > 0) { output |= (1 << device->PINS.pin[i]); } } lcd_clk_pulse(device); } } } void lcd_clk_pulse(struct HD44780_Device *device) { /* CLK is active low */ device->gpioport->ODR &= ~(1 << device->PINS.BITS.pinCLK); /* You may use delay8, delay16, or delay32 depending on your clk rate */ delay16(LCD_CLK_DURATION); /* Block CPU for Setup&Hold */ device->gpioport->ODR ^= (1 << device->PINS.BITS.pinCLK); }
gpl-2.0
FLOWERCLOUD/PCM2015plus
PCM/graph_cut_node_old.cpp
1
17280
#include "graph_cut_node.h" #include <stdio.h> #include <engine.h> #ifdef WIN32 #include <direct.h> #define get_current_dir _getcwd #else #include <unistd.h> #define get_current_dir getcwd #endif #include <cstring> #include"sample_set.h" void GraphNodeCtr::run() { Logger<<"Start!.\n"; IndexType nbCluster = 5; //read_label_file("labelInfo.txt"); //read_label_file("labelInfo_edit.txt"); //read_label_file("labelInfo_28.txt"); //read_corres_file("corInfo.txt"); //read_label_file("labelInfo_456.txt"); //read_corres_file("corInfo_456.txt"); read_label_file("tot_labels_dancer.txt");//dancer girl_f(91-110) read_corres_file("tot_cor_dancer.txt"); pca_box_ctr(); Engine* ep; if (! (ep = engOpen(NULL)) ) { Logger<< "Can't not start Matlab engine.\n"; return; } // set buffer to display result IndexType result_buffer_size = 1024*1000; char* result_buffer = new char[result_buffer_size]; engOutputBuffer(ep, result_buffer, result_buffer_size); //Get the executable file's path char cur_path[FILENAME_MAX]; if (!get_current_dir(cur_path, sizeof(cur_path))) { return; } cur_path[sizeof(cur_path) - 1] = '\0'; strcat(cur_path,"\\nCut"); char cd_cur_path[FILENAME_MAX + 3] = "cd "; strcat(cd_cur_path, cur_path); engEvalString(ep, cd_cur_path ); IndexType n = cur_graph_index_; mxArray *mx_distMat = NULL; numeric::float64* dm_buffer; dm_buffer = new numeric::float64[n*n]; mx_distMat = mxCreateDoubleMatrix(n,n,mxREAL); ScalarType a_w = 0.01f; //ScalarType avg_a_dis ,a_mid; //compute_mid_and_avg(avg_a_dis,a_mid,&GraphNodeCtr::dist_inside_frame); //Logger<<"avg and mid of adjacency"<<avg_a_dis<<" "<<a_mid<<endl; //ScalarType avg_cor_dis,cor_dis; //compute_mid_and_avg(avg_cor_dis,cor_dis,&GraphNodeCtr::dist_between_frame); //Logger<<"avg and mid of corresponcdence"<<avg_cor_dis<<" "<<cor_dis<<endl; for (int i=0; i<n;i++) { //dm_buffer[i*(n+1)] = exp(-a_w * a_w); dm_buffer[i*(n+1)] = a_w; for (int j=i+1; j<n; j++) { //ScalarType dist = weight2nodes(node_vec[i],node_vec[j]); ScalarType dist = weight2nodes(node_vec[i],node_vec[j],a_w); dm_buffer[i*n+j] = dm_buffer[j*n+i] = (numeric::float64)dist; } } //FILE *out_file = fopen("disMat","w"); //for (int i=0;i<n;i++) //{ // for (int j=0;j<n;j++) // { // //fprintf(out_file,"%lf ",dm_buffer[i*n+j]); // fprintf(out_file,"%lf ",disMat(i,j)); // } // fprintf(out_file,"\n"); //} //fclose(out_file); //FILE *in_file = fopen("weight_1_9","r"); //for (int i=0;i<n;i++) //{ // for (int j=0;j<n;j++) // { // fscanf(in_file,"%lf",&dm_buffer[i*n+j]); // } //} //fclose(in_file); memcpy((char*)mxGetPr(mx_distMat),(char*)dm_buffer,n*n*sizeof(numeric::float64)); delete [] dm_buffer; engPutVariable(ep,"W",mx_distMat); char cmd_buf[128]; sprintf(cmd_buf,"[NcutDiscrete,NcutEigenvectors,NcutEigenvalues] = ncutW(W,%d);",nbCluster); engEvalString(ep,cmd_buf); //Display output information Logger<<result_buffer<<std::endl; mxArray *mx_NcutDiscrete = NULL; mx_NcutDiscrete = engGetVariable(ep,"NcutDiscrete"); numeric::float64 *ncutDiscrete = mxGetPr(mx_NcutDiscrete); IndexType k=0; for ( IndexType i=0;i<nbCluster;i++ ) { for (IndexType j=0;j<n;j++) { if ( ncutDiscrete[k++]!=0 ) { node_vec[j]->graph_label = i; } } } //Visualize SampleSet &smp_set = SampleSet::get_instance(); IndexType frames[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19}; for ( IndexType i=0; i<3;i++ ) { for (IndexType j=0; j<smp_set[frames[i]].num_vertices(); j++) { smp_set[frames[i]][j].set_visble(false); } } for ( IndexType i=0; i<n;i++ ) { GraphCutNode &node = *(node_vec[i]); smp_set[node.frame][node.index].set_label(node.graph_label); smp_set[node.frame][node.index].set_visble(true); } Logger<<"finished\n"; } void GraphNodeCtr::add_node(IndexType frame, IndexType label, IndexType index) { GraphCutNode *new_space = allocator_.allocate<GraphCutNode>(); GraphCutNode *new_node = new(new_space) GraphCutNode(frame,label,index,cur_graph_index_++); node_vec.push_back(new_node); node_map[frame_index_to_key(frame,index)] = new_node; } void GraphNodeCtr::add_corresponding_relation( IndexType frame, IndexType index, IndexType cor_frame, IndexType cor_idx ) { node_map[frame_index_to_key(frame,index)]->cor_frame_index.insert(make_pair(cor_frame,cor_idx)); } void GraphNodeCtr::read_label_file(char *filename) { FILE *in_file = fopen(filename,"r"); if (in_file==NULL) { return; } while (true) { int frame,label,index; int status = fscanf(in_file,"%d %d %d\n",&frame,&label,&index); if(status==EOF)break; add_node(frame, label, index); label_bucket[frame_label_to_key(frame,label)].insert(index); } } void GraphNodeCtr::read_corres_file(char *filename) { FILE *in_file = fopen(filename,"r"); if (in_file==NULL) { return; } while (true) { int frame,index,cor_frame,cor_index; int status = fscanf(in_file,"%d %d %d %d\n",&frame,&index,&cor_frame,&cor_index); if(status==EOF)break; add_corresponding_relation(frame,index,cor_frame,cor_index); } } ScalarType GraphNodeCtr::dist_inside_frame(GraphCutNode* s_node,GraphCutNode* e_node) { if (s_node->frame != e_node->frame || s_node->label != e_node->label) { return 1e8; } Sample &smp = SampleSet::get_instance()[s_node->frame]; const IndexType k = 100; IndexType neighbours[k]; ScalarType dist[k]; bool is_neighbour=false; smp.neighbours(s_node->index, k, neighbours, dist); for ( int i=0; i<k; i++ ) { if(neighbours[i]==e_node->index) { is_neighbour = true; break; } } if (is_neighbour==false) { return 1e8; } { ScalarType diag = 1.0;// diagonal of the box(Index by frame and label) PointType start,end;//index start = m_smpSet[s_node->frame].vertices_matrix().col(s_node->index); end = m_smpSet[e_node->frame].vertices_matrix().col(e_node->index); map<IndexType,IndexType>::iterator start_isInValidIter,end_IsValidIter; PCABox* box = box_bucket[frame_label_to_key(s_node->frame,s_node->label) ];//calculate diag of box ScalarType dis = (start- end).norm(); diag = box->diagLen; IndexType labelsize = box->vtxSize; assert(labelsize>0); for(auto iter = s_node->cor_frame_index.begin(); iter!= s_node->cor_frame_index.end(); iter++) { start_isInValidIter = s_node->cor_frame_index.find(iter->first); end_IsValidIter = e_node->cor_frame_index.find(iter->first); GraphCutNode *s_cor = node_map[frame_index_to_key(start_isInValidIter->first,start_isInValidIter->second)]; GraphCutNode *e_cor = node_map[frame_index_to_key(end_IsValidIter->first,end_IsValidIter->second)]; ScalarType dis = (start- end).norm(); if(s_cor->label != e_cor->label)//in different label { return 8*(dis/(diag*labelsize)); }else { return dis/(diag * labelsize); } } } } void GraphNodeCtr::pca_box_ctr() { IndexType boxid = 0; for ( unordered_map<IndexType,set<IndexType>>::iterator iter=label_bucket.begin(); iter!=label_bucket.end();iter++ ) { IndexType frame_label = iter->first; set<IndexType>& members = iter->second; IndexType frame = get_frame_from_key(frame_label); IndexType k=members.size(); MatrixX3 coords(k,3); Sample& smp = SampleSet::get_instance()[frame]; int j=0; for ( set<IndexType>::iterator m_iter=members.begin(); m_iter!=members.end(); m_iter++,j++ ) { coords.row(j)<<smp[*m_iter].x(),smp[*m_iter].y(),smp[*m_iter].z(); } MatrixX3 vert_mean = coords.colwise().mean(); MatrixX3 Q(k, 3); for ( IndexType j=0; j<k;j++) { Q.row(j) = coords.row(j) - vert_mean; } Matrix33 sigma = Q.transpose() * Q; Eigen::EigenSolver<Matrix33> eigen_solver(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV); auto evec = eigen_solver.eigenvectors(); auto eval = eigen_solver.eigenvalues(); PCABox *new_space = allocator_.allocate<PCABox>(); box_bucket[frame_label] = new(new_space) PCABox; Matrix33 evecMat; for(int i=0;i<3;i++) { for(int j=0;j<3;j++) { evecMat(j,i) = (evec.col(i))(j).real(); } } MatrixX3 newCoor = Q * evecMat; Matrix23 minmax; minmax.setZero(); for (int i = 0;i<3;i++) { minmax(0,i) = newCoor.col(i).minCoeff(); minmax(1,i) = newCoor.col(i).maxCoeff(); } PCABox *pbox = box_bucket[frame_label]; for (int i=0;i<3;i++) { pbox->center(i) = (vert_mean.row(0))(i); } pbox->minPoint = minmax.row(0); pbox->maxPoint = minmax.row(1); PointType dis = pbox->maxPoint - pbox->minPoint; pbox->volume = dis(0,0) *dis(1,0)*dis(2.0); pbox->diagLen = sqrt(dis(0,0)*dis(0,0) + dis(1,0)*dis(1,0) + dis(2,0)*dis(2,0)); pbox->vtxSize = k; //Logger<<"boxes"<<boxid++<<endl; //Logger<<"box volume"<<pbox->volume<<endl; //Logger<<"box length"<<pbox->diagLen<<endl; //Logger<<"size"<<members.size()<<endl; } } ScalarType GraphNodeCtr::dist_inside_frame_all(GraphCutNode* s_node,GraphCutNode* e_node) { if (s_node->frame != e_node->frame || s_node->label != e_node->label) { return 1e8; }else//identity frame and label { ScalarType diag = 1.0;// diagonal of the box(Index by frame and label) PointType start,end;//index start = m_smpSet[s_node->frame].vertices_matrix().col(s_node->index); end = m_smpSet[e_node->frame].vertices_matrix().col(e_node->index); //correspodence of start and end point are in the same label?2014-11-28 map<IndexType,IndexType>::iterator start_isInValidIter,end_IsValidIter; PCABox* box = box_bucket[frame_label_to_key(s_node->frame,s_node->label) ];//calculate diag of box diag = box->diagLen; ScalarType dis = 0.0; bool flagDiff = false; IndexType labelsize = box->vtxSize; assert(labelsize>0); IndexType nDiff = 0; IndexType nIden = 0; for(auto iter = s_node->cor_frame_index.begin(); iter!= s_node->cor_frame_index.end(); iter++) { start_isInValidIter = s_node->cor_frame_index.find(iter->first); end_IsValidIter = e_node->cor_frame_index.find(iter->first); GraphCutNode *s_cor = node_map[frame_index_to_key(start_isInValidIter->first,start_isInValidIter->second)]; GraphCutNode *e_cor = node_map[frame_index_to_key(end_IsValidIter->first,end_IsValidIter->second)]; dis = (start- end).norm(); if(s_cor->label != e_cor->label)//in different label { //return 3 *(dis/diag); nDiff ++; //flagDiff = true; }else { //return dis/(diag); nIden ++; } } if(flagDiff || (nDiff > nIden)) { return 3 *(dis/diag); } return dis/(diag); ///return 4 *(dis/(diag*labelsize)); //return dis/(diag*labelsize); } } ScalarType GraphNodeCtr::dist_between_frame(GraphCutNode* s_node,GraphCutNode* e_node) { map<IndexType,IndexType>::iterator isInValidIter; isInValidIter = s_node->cor_frame_index.find(e_node->frame); if (s_node->frame == e_node->frame || isInValidIter->second != e_node->index ) { return 1e8; }else { ScalarType corDis = minCorrespondenceDis(s_node,e_node);//using min deformable of the edge ScalarType boxRation = minVoxelRation(s_node,e_node); if(corDis > 0.5)//Ôö¼ÓÒ»Ìõ-Èç¹û±äÐÎÁ¿³¬¹ýÁËÒ»¶¨Á¿£¬ÄÇôÕâ¸ö¶ÔÓ¦¾ÍûÓÐÒâÒå¡£ { return 1e8; } if(boxRation < 0.8)//Íƶϳöµã¶ÔÓ¦ÊôÓÚ²»Í¬µÄÀà,¼õСËüÃÇÖ®¼äµÄ¾àÀë { return 0.0; //return 1e-2* corDis; //return corDis * corDis; }else { return boxRation * boxRation * corDis; //return 1e2 * corDis; //return corDis * corDis * corDis; } } } ScalarType GraphNodeCtr::weight2nodes(GraphCutNode* s_node,GraphCutNode* e_node) { ScalarType adjDis = 0.0; ScalarType wg_a = 0.01; ScalarType var_a = 0.3;//median of the distances ScalarType corDis = 0.0; ScalarType wg_c = 0.99; ScalarType var_c = 0.25; ScalarType adjWeight = 1.0; ScalarType corWeight = 1.0; adjDis = dist_inside_frame(s_node,e_node); //adjDis = dist_inside_frame_all(s_node,e_node); corDis = dist_between_frame(s_node,e_node); if (adjDis > 1e5) { adjWeight = 0.0; }else { adjWeight = exp(- adjDis *adjDis/var_a); } if(corDis > 1e5) { corWeight = 0.0; } else { corWeight = exp(- corDis *corDis/var_c); //Logger<<"cor dis"<<corDis<<endl; } return wg_a * adjWeight + wg_c * corWeight; } ScalarType GraphNodeCtr::weight2nodes(GraphCutNode* s_node,GraphCutNode* e_node,ScalarType a_weight) { ScalarType adjDis = 0.0; //ScalarType var_a = 0.0182201;//median of the distances-3frames(0.020051) ScalarType var_a = 0.0369636;//0.0356051;//median of the distances-3frames() ScalarType corDis = 0.0; //ScalarType var_c = 0.0901867;//avg-3frames(0.0363027) ScalarType var_c = 0.0663885;//2-frames--0.106298;//median-3frames(0.0561515) ScalarType adjWeight = 1.0; ScalarType corWeight = 1.0; //adjDis = dist_inside_frame(s_node,e_node);//k-neighboor adjDis = dist_inside_frame_all(s_node,e_node);//total corDis = dist_between_frame(s_node,e_node); if (adjDis > 1e5) { adjWeight = 0.0; }else { adjWeight = exp(- adjDis *adjDis/(2*var_a)); //Logger<<"a distance = "<<adjDis<<endl; } if(corDis > 1e5) { corWeight = 0.0; } else { corWeight = exp(- corDis *corDis/(2*var_c)); } return a_weight * adjWeight + (1 - a_weight) * corWeight; } ScalarType GraphNodeCtr::weight2nodes(GraphCutNode* s_node,GraphCutNode* e_node,ScalarType& adjDis,ScalarType& corDis) { //ScalarType adjDis = 0.0; ScalarType wg_a = 0.01; ScalarType var_a = 0.3;//median of the distances //ScalarType corDis = 0.0; ScalarType wg_c = 0.99; ScalarType var_c = 0.3; ScalarType adjWeight = 1.0; ScalarType corWeight = 1.0; adjDis = dist_inside_frame(s_node,e_node); corDis = dist_between_frame(s_node,e_node); if (adjDis > 1e5) { adjWeight = 0.0; }else { adjWeight = exp(- adjDis *adjDis/(2*var_a)); } if(corDis > 1e5) { corWeight = 0.0; } else { corWeight = exp(- corDis *corDis/(2*var_c)); } return wg_a * adjWeight + wg_c * corWeight; } ScalarType GraphNodeCtr::measureDeformableCorVer(IndexType sFrame,IndexType sId,IndexType tFrame,IndexType tId) { IndexType * neigSrGraph = new IndexType[m_neigNum]; IndexType * neigTgGraph = new IndexType[m_neigNum]; Matrix3X neigSrCoor(3,m_neigNum); Matrix3X neigTgCoor(3,m_neigNum); VecX neigDis; //record ori distance VecX resNeigDis; // record target distances neigDis.setZero(m_neigNum,1); resNeigDis.setZero(m_neigNum,1); neigSrCoor.setZero(); neigTgCoor.setZero(); m_smpSet[sFrame].neighbours(sId,m_neigNum,neigSrGraph); m_smpSet[tFrame].neighbours(tId,m_neigNum,neigTgGraph); getConstantNeigCoorByIndex(m_smpSet[sFrame],neigSrGraph,neigSrCoor); getConstantNeigCoorByIndex(m_smpSet[tFrame],neigTgGraph,neigTgCoor); calculateNeigDis(neigSrCoor,neigDis); calculateNeigDis(neigTgCoor,resNeigDis); return deformableValue(neigDis,resNeigDis); } void GraphNodeCtr::getConstantNeigCoorByIndex(Sample & smp,IndexType * neigIndex,Matrix3X & res_coor) { for (IndexType v_it = 0; v_it < m_neigNum;v_it++) { res_coor.col(v_it) = smp.vertices_matrix().col(neigIndex[v_it]); } } void GraphNodeCtr::calculateNeigDis(Matrix3X & neigCoor,VecX& resDis) { IndexType vtx_num = neigCoor.cols(); PointType ori = neigCoor.col(0); PointType diff; diff.setZero(); for (IndexType vtx_iter = 1; vtx_iter < vtx_num;vtx_iter ++) { diff = neigCoor.col(vtx_iter) - ori; resDis(vtx_iter,0) = diff.norm(); } } ScalarType GraphNodeCtr::deformableValue(VecX& srNeigDis,VecX & resNeigDis) { ScalarType totle = 0.0; ScalarType molecule = 0.0; ScalarType denominator = 1.0; IndexType neigNum = resNeigDis.rows(); for (IndexType v_iter = 1;v_iter < neigNum; v_iter++) { denominator = srNeigDis(v_iter,0); molecule = abs(resNeigDis(v_iter,0) - denominator); assert(denominator > 1e-7); totle += molecule/denominator; } return totle/(neigNum - 1); } ScalarType GraphNodeCtr::minCorrespondenceDis(GraphCutNode* s_node,GraphCutNode* e_node) { map<IndexType,IndexType>::iterator isInValidIter; ScalarType toDeformable = measureDeformableCorVer(s_node->frame,s_node->index,e_node->frame,e_node->index); isInValidIter = e_node->cor_frame_index.find(s_node->frame); if(isInValidIter == e_node->cor_frame_index.end()) { Logger<<"can't find inverse correspondence Error!.\n"; } ScalarType fromDeformable = measureDeformableCorVer(e_node->frame,e_node->index,s_node->frame,isInValidIter->second); return min(toDeformable,fromDeformable); } ScalarType GraphNodeCtr::minVoxelRation(GraphCutNode* s_node,GraphCutNode* e_node) { PCABox* sBox = box_bucket[frame_label_to_key(s_node->frame,s_node->label) ]; PCABox* tBox = box_bucket[frame_label_to_key(e_node->frame,e_node->label) ]; ScalarType sBoxVoxel = sBox->volume; ScalarType tBoxVoxel = tBox->volume; return min(sBoxVoxel/tBoxVoxel,tBoxVoxel/sBoxVoxel); } void GraphNodeCtr::compute_mid_and_avg(ScalarType &mid, ScalarType &avg, pDistFunc dist_func) { const ScalarType DEF_INF = 1e8; int n = node_vec.size(); vector<ScalarType> dist_vec; ScalarType sum=0.f; for (int i=0;i<n;i++) { for(int j=i+1;j<n;j++) { ScalarType dist = (this->*dist_func)(node_vec[i],node_vec[j]); if (dist==DEF_INF) { continue; } dist_vec.push_back(dist); sum += dist; } } sort(dist_vec.begin(),dist_vec.end()); mid = dist_vec[dist_vec.size()/2]; avg = sum/dist_vec.size(); }
gpl-2.0
evolver56k/xpenology
fs/reiserfs/fix_node.c
1
79018
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ /** ** old_item_num ** old_entry_num ** set_entry_sizes ** create_virtual_node ** check_left ** check_right ** directory_part_size ** get_num_ver ** set_parameters ** is_leaf_removable ** are_leaves_removable ** get_empty_nodes ** get_lfree ** get_rfree ** is_left_neighbor_in_cache ** decrement_key ** get_far_parent ** get_parents ** can_node_be_removed ** ip_check_balance ** dc_check_balance_internal ** dc_check_balance_leaf ** dc_check_balance ** check_balance ** get_direct_parent ** get_neighbors ** fix_nodes ** ** **/ #include <linux/time.h> #include <linux/slab.h> #include <linux/string.h> #include "reiserfs.h" #include <linux/buffer_head.h> /* To make any changes in the tree we find a node, that contains item to be changed/deleted or position in the node we insert a new item to. We call this node S. To do balancing we need to decide what we will shift to left/right neighbor, or to a new node, where new item will be etc. To make this analysis simpler we build virtual node. Virtual node is an array of items, that will replace items of node S. (For instance if we are going to delete an item, virtual node does not contain it). Virtual node keeps information about item sizes and types, mergeability of first and last items, sizes of all entries in directory item. We use this array of items when calculating what we can shift to neighbors and how many nodes we have to have if we do not any shiftings, if we shift to left/right neighbor or to both. */ /* taking item number in virtual node, returns number of item, that it has in source buffer */ static inline int old_item_num(int new_num, int affected_item_num, int mode) { if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num) return new_num; if (mode == M_INSERT) { RFALSE(new_num == 0, "vs-8005: for INSERT mode and item number of inserted item"); return new_num - 1; } RFALSE(mode != M_DELETE, "vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'", mode); /* delete mode */ return new_num + 1; } static void create_virtual_node(struct tree_balance *tb, int h) { struct item_head *ih; struct virtual_node *vn = tb->tb_vn; int new_num; struct buffer_head *Sh; /* this comes from tb->S[h] */ Sh = PATH_H_PBUFFER(tb->tb_path, h); /* size of changed node */ vn->vn_size = MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h]; /* for internal nodes array if virtual items is not created */ if (h) { vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE); return; } /* number of items in virtual node */ vn->vn_nr_item = B_NR_ITEMS(Sh) + ((vn->vn_mode == M_INSERT) ? 1 : 0) - ((vn->vn_mode == M_DELETE) ? 1 : 0); /* first virtual item */ vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1); memset(vn->vn_vi, 0, vn->vn_nr_item * sizeof(struct virtual_item)); vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item); /* first item in the node */ ih = B_N_PITEM_HEAD(Sh, 0); /* define the mergeability for 0-th item (if it is not being deleted) */ if (op_is_left_mergeable(&(ih->ih_key), Sh->b_size) && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num)) vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE; /* go through all items those remain in the virtual node (except for the new (inserted) one) */ for (new_num = 0; new_num < vn->vn_nr_item; new_num++) { int j; struct virtual_item *vi = vn->vn_vi + new_num; int is_affected = ((new_num != vn->vn_affected_item_num) ? 0 : 1); if (is_affected && vn->vn_mode == M_INSERT) continue; /* get item number in source node */ j = old_item_num(new_num, vn->vn_affected_item_num, vn->vn_mode); vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE; vi->vi_ih = ih + j; vi->vi_item = B_I_PITEM(Sh, ih + j); vi->vi_uarea = vn->vn_free_ptr; // FIXME: there is no check, that item operation did not // consume too much memory vn->vn_free_ptr += op_create_vi(vn, vi, is_affected, tb->insert_size[0]); if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) reiserfs_panic(tb->tb_sb, "vs-8030", "virtual node space consumed"); if (!is_affected) /* this is not being changed */ continue; if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) { vn->vn_vi[new_num].vi_item_len += tb->insert_size[0]; vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted } } /* virtual inserted item is not defined yet */ if (vn->vn_mode == M_INSERT) { struct virtual_item *vi = vn->vn_vi + vn->vn_affected_item_num; RFALSE(vn->vn_ins_ih == NULL, "vs-8040: item header of inserted item is not specified"); vi->vi_item_len = tb->insert_size[0]; vi->vi_ih = vn->vn_ins_ih; vi->vi_item = vn->vn_data; vi->vi_uarea = vn->vn_free_ptr; op_create_vi(vn, vi, 0 /*not pasted or cut */ , tb->insert_size[0]); } /* set right merge flag we take right delimiting key and check whether it is a mergeable item */ if (tb->CFR[0]) { struct reiserfs_key *key; key = B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]); if (op_is_left_mergeable(key, Sh->b_size) && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) vn->vn_vi[vn->vn_nr_item - 1].vi_type |= VI_TYPE_RIGHT_MERGEABLE; #ifdef CONFIG_REISERFS_CHECK if (op_is_left_mergeable(key, Sh->b_size) && !(vn->vn_mode != M_DELETE || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) { /* we delete last item and it could be merged with right neighbor's first item */ if (! (B_NR_ITEMS(Sh) == 1 && is_direntry_le_ih(B_N_PITEM_HEAD(Sh, 0)) && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) { /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */ print_block(Sh, 0, -1, -1); reiserfs_panic(tb->tb_sb, "vs-8045", "rdkey %k, affected item==%d " "(mode==%c) Must be %c", key, vn->vn_affected_item_num, vn->vn_mode, M_DELETE); } } #endif } } /* using virtual node check, how many items can be shifted to left neighbor */ static void check_left(struct tree_balance *tb, int h, int cur_free) { int i; struct virtual_node *vn = tb->tb_vn; struct virtual_item *vi; int d_size, ih_size; RFALSE(cur_free < 0, "vs-8050: cur_free (%d) < 0", cur_free); /* internal level */ if (h > 0) { tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE); return; } /* leaf level */ if (!cur_free || !vn->vn_nr_item) { /* no free space or nothing to move */ tb->lnum[h] = 0; tb->lbytes = -1; return; } RFALSE(!PATH_H_PPARENT(tb->tb_path, 0), "vs-8055: parent does not exist or invalid"); vi = vn->vn_vi; if ((unsigned int)cur_free >= (vn->vn_size - ((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) { /* all contents of S[0] fits into L[0] */ RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE, "vs-8055: invalid mode or balance condition failed"); tb->lnum[0] = vn->vn_nr_item; tb->lbytes = -1; return; } d_size = 0, ih_size = IH_SIZE; /* first item may be merge with last item in left neighbor */ if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE) d_size = -((int)IH_SIZE), ih_size = 0; tb->lnum[0] = 0; for (i = 0; i < vn->vn_nr_item; i++, ih_size = IH_SIZE, d_size = 0, vi++) { d_size += vi->vi_item_len; if (cur_free >= d_size) { /* the item can be shifted entirely */ cur_free -= d_size; tb->lnum[0]++; continue; } /* the item cannot be shifted entirely, try to split it */ /* check whether L[0] can hold ih and at least one byte of the item body */ if (cur_free <= ih_size) { /* cannot shift even a part of the current item */ tb->lbytes = -1; return; } cur_free -= ih_size; tb->lbytes = op_check_left(vi, cur_free, 0, 0); if (tb->lbytes != -1) /* count partially shifted item */ tb->lnum[0]++; break; } return; } /* using virtual node check, how many items can be shifted to right neighbor */ static void check_right(struct tree_balance *tb, int h, int cur_free) { int i; struct virtual_node *vn = tb->tb_vn; struct virtual_item *vi; int d_size, ih_size; RFALSE(cur_free < 0, "vs-8070: cur_free < 0"); /* internal level */ if (h > 0) { tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE); return; } /* leaf level */ if (!cur_free || !vn->vn_nr_item) { /* no free space */ tb->rnum[h] = 0; tb->rbytes = -1; return; } RFALSE(!PATH_H_PPARENT(tb->tb_path, 0), "vs-8075: parent does not exist or invalid"); vi = vn->vn_vi + vn->vn_nr_item - 1; if ((unsigned int)cur_free >= (vn->vn_size - ((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) { /* all contents of S[0] fits into R[0] */ RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE, "vs-8080: invalid mode or balance condition failed"); tb->rnum[h] = vn->vn_nr_item; tb->rbytes = -1; return; } d_size = 0, ih_size = IH_SIZE; /* last item may be merge with first item in right neighbor */ if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) d_size = -(int)IH_SIZE, ih_size = 0; tb->rnum[0] = 0; for (i = vn->vn_nr_item - 1; i >= 0; i--, d_size = 0, ih_size = IH_SIZE, vi--) { d_size += vi->vi_item_len; if (cur_free >= d_size) { /* the item can be shifted entirely */ cur_free -= d_size; tb->rnum[0]++; continue; } /* check whether R[0] can hold ih and at least one byte of the item body */ if (cur_free <= ih_size) { /* cannot shift even a part of the current item */ tb->rbytes = -1; return; } /* R[0] can hold the header of the item and at least one byte of its body */ cur_free -= ih_size; /* cur_free is still > 0 */ tb->rbytes = op_check_right(vi, cur_free); if (tb->rbytes != -1) /* count partially shifted item */ tb->rnum[0]++; break; } return; } /* * from - number of items, which are shifted to left neighbor entirely * to - number of item, which are shifted to right neighbor entirely * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */ static int get_num_ver(int mode, struct tree_balance *tb, int h, int from, int from_bytes, int to, int to_bytes, short *snum012, int flow) { int i; int cur_free; // int bytes; int units; struct virtual_node *vn = tb->tb_vn; // struct virtual_item * vi; int total_node_size, max_node_size, current_item_size; int needed_nodes; int start_item, /* position of item we start filling node from */ end_item, /* position of item we finish filling node by */ start_bytes, /* number of first bytes (entries for directory) of start_item-th item we do not include into node that is being filled */ end_bytes; /* number of last bytes (entries for directory) of end_item-th item we do node include into node that is being filled */ int split_item_positions[2]; /* these are positions in virtual item of items, that are split between S[0] and S1new and S1new and S2new */ split_item_positions[0] = -1; split_item_positions[1] = -1; /* We only create additional nodes if we are in insert or paste mode or we are in replace mode at the internal level. If h is 0 and the mode is M_REPLACE then in fix_nodes we change the mode to paste or insert before we get here in the code. */ RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE), "vs-8100: insert_size < 0 in overflow"); max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h)); /* snum012 [0-2] - number of items, that lay to S[0], first new node and second new node */ snum012[3] = -1; /* s1bytes */ snum012[4] = -1; /* s2bytes */ /* internal level */ if (h > 0) { i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE); if (i == max_node_size) return 1; return (i / max_node_size + 1); } /* leaf level */ needed_nodes = 1; total_node_size = 0; cur_free = max_node_size; // start from 'from'-th item start_item = from; // skip its first 'start_bytes' units start_bytes = ((from_bytes != -1) ? from_bytes : 0); // last included item is the 'end_item'-th one end_item = vn->vn_nr_item - to - 1; // do not count last 'end_bytes' units of 'end_item'-th item end_bytes = (to_bytes != -1) ? to_bytes : 0; /* go through all item beginning from the start_item-th item and ending by the end_item-th item. Do not count first 'start_bytes' units of 'start_item'-th item and last 'end_bytes' of 'end_item'-th item */ for (i = start_item; i <= end_item; i++) { struct virtual_item *vi = vn->vn_vi + i; int skip_from_end = ((i == end_item) ? end_bytes : 0); RFALSE(needed_nodes > 3, "vs-8105: too many nodes are needed"); /* get size of current item */ current_item_size = vi->vi_item_len; /* do not take in calculation head part (from_bytes) of from-th item */ current_item_size -= op_part_size(vi, 0 /*from start */ , start_bytes); /* do not take in calculation tail part of last item */ current_item_size -= op_part_size(vi, 1 /*from end */ , skip_from_end); /* if item fits into current node entierly */ if (total_node_size + current_item_size <= max_node_size) { snum012[needed_nodes - 1]++; total_node_size += current_item_size; start_bytes = 0; continue; } if (current_item_size > max_node_size) { /* virtual item length is longer, than max size of item in a node. It is impossible for direct item */ RFALSE(is_direct_le_ih(vi->vi_ih), "vs-8110: " "direct item length is %d. It can not be longer than %d", current_item_size, max_node_size); /* we will try to split it */ flow = 1; } if (!flow) { /* as we do not split items, take new node and continue */ needed_nodes++; i--; total_node_size = 0; continue; } // calculate number of item units which fit into node being // filled { int free_space; free_space = max_node_size - total_node_size - IH_SIZE; units = op_check_left(vi, free_space, start_bytes, skip_from_end); if (units == -1) { /* nothing fits into current node, take new node and continue */ needed_nodes++, i--, total_node_size = 0; continue; } } /* something fits into the current node */ //if (snum012[3] != -1 || needed_nodes != 1) // reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required"); //snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units; start_bytes += units; snum012[needed_nodes - 1 + 3] = units; if (needed_nodes > 2) reiserfs_warning(tb->tb_sb, "vs-8111", "split_item_position is out of range"); snum012[needed_nodes - 1]++; split_item_positions[needed_nodes - 1] = i; needed_nodes++; /* continue from the same item with start_bytes != -1 */ start_item = i; i--; total_node_size = 0; } // sum012[4] (if it is not -1) contains number of units of which // are to be in S1new, snum012[3] - to be in S0. They are supposed // to be S1bytes and S2bytes correspondingly, so recalculate if (snum012[4] > 0) { int split_item_num; int bytes_to_r, bytes_to_l; int bytes_to_S1new; split_item_num = split_item_positions[1]; bytes_to_l = ((from == split_item_num && from_bytes != -1) ? from_bytes : 0); bytes_to_r = ((end_item == split_item_num && end_bytes != -1) ? end_bytes : 0); bytes_to_S1new = ((split_item_positions[0] == split_item_positions[1]) ? snum012[3] : 0); // s2bytes snum012[4] = op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] - bytes_to_r - bytes_to_l - bytes_to_S1new; if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY && vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT) reiserfs_warning(tb->tb_sb, "vs-8115", "not directory or indirect item"); } /* now we know S2bytes, calculate S1bytes */ if (snum012[3] > 0) { int split_item_num; int bytes_to_r, bytes_to_l; int bytes_to_S2new; split_item_num = split_item_positions[0]; bytes_to_l = ((from == split_item_num && from_bytes != -1) ? from_bytes : 0); bytes_to_r = ((end_item == split_item_num && end_bytes != -1) ? end_bytes : 0); bytes_to_S2new = ((split_item_positions[0] == split_item_positions[1] && snum012[4] != -1) ? snum012[4] : 0); // s1bytes snum012[3] = op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] - bytes_to_r - bytes_to_l - bytes_to_S2new; } return needed_nodes; } /* Set parameters for balancing. * Performs write of results of analysis of balancing into structure tb, * where it will later be used by the functions that actually do the balancing. * Parameters: * tb tree_balance structure; * h current level of the node; * lnum number of items from S[h] that must be shifted to L[h]; * rnum number of items from S[h] that must be shifted to R[h]; * blk_num number of blocks that S[h] will be splitted into; * s012 number of items that fall into splitted nodes. * lbytes number of bytes which flow to the left neighbor from the item that is not * not shifted entirely * rbytes number of bytes which flow to the right neighbor from the item that is not * not shifted entirely * s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array) */ static void set_parameters(struct tree_balance *tb, int h, int lnum, int rnum, int blk_num, short *s012, int lb, int rb) { tb->lnum[h] = lnum; tb->rnum[h] = rnum; tb->blknum[h] = blk_num; if (h == 0) { /* only for leaf level */ if (s012 != NULL) { tb->s0num = *s012++, tb->s1num = *s012++, tb->s2num = *s012++; tb->s1bytes = *s012++; tb->s2bytes = *s012; } tb->lbytes = lb; tb->rbytes = rb; } PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum); PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum); PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb); PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb); } /* check, does node disappear if we shift tb->lnum[0] items to left neighbor and tb->rnum[0] to the right one. */ static int is_leaf_removable(struct tree_balance *tb) { struct virtual_node *vn = tb->tb_vn; int to_left, to_right; int size; int remain_items; /* number of items, that will be shifted to left (right) neighbor entirely */ to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0); to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0); remain_items = vn->vn_nr_item; /* how many items remain in S[0] after shiftings to neighbors */ remain_items -= (to_left + to_right); if (remain_items < 1) { /* all content of node can be shifted to neighbors */ set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0, NULL, -1, -1); return 1; } if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1) /* S[0] is not removable */ return 0; /* check, whether we can divide 1 remaining item between neighbors */ /* get size of remaining item (in item units) */ size = op_unit_num(&(vn->vn_vi[to_left])); if (tb->lbytes + tb->rbytes >= size) { set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL, tb->lbytes, -1); return 1; } return 0; } /* check whether L, S, R can be joined in one node */ static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree) { struct virtual_node *vn = tb->tb_vn; int ih_size; struct buffer_head *S0; S0 = PATH_H_PBUFFER(tb->tb_path, 0); ih_size = 0; if (vn->vn_nr_item) { if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE) ih_size += IH_SIZE; if (vn->vn_vi[vn->vn_nr_item - 1]. vi_type & VI_TYPE_RIGHT_MERGEABLE) ih_size += IH_SIZE; } else { /* there was only one item and it will be deleted */ struct item_head *ih; RFALSE(B_NR_ITEMS(S0) != 1, "vs-8125: item number must be 1: it is %d", B_NR_ITEMS(S0)); ih = B_N_PITEM_HEAD(S0, 0); if (tb->CFR[0] && !comp_short_le_keys(&(ih->ih_key), B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]))) if (is_direntry_le_ih(ih)) { /* Directory must be in correct state here: that is somewhere at the left side should exist first directory item. But the item being deleted can not be that first one because its right neighbor is item of the same directory. (But first item always gets deleted in last turn). So, neighbors of deleted item can be merged, so we can save ih_size */ ih_size = IH_SIZE; /* we might check that left neighbor exists and is of the same directory */ RFALSE(le_ih_k_offset(ih) == DOT_OFFSET, "vs-8130: first directory item can not be removed until directory is not empty"); } } if (MAX_CHILD_SIZE(S0) + vn->vn_size <= rfree + lfree + ih_size) { set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1); PROC_INFO_INC(tb->tb_sb, leaves_removable); return 1; } return 0; } /* when we do not split item, lnum and rnum are numbers of entire items */ #define SET_PAR_SHIFT_LEFT \ if (h)\ {\ int to_l;\ \ to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\ (MAX_NR_KEY(Sh) + 1 - lpar);\ \ set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\ }\ else \ {\ if (lset==LEFT_SHIFT_FLOW)\ set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\ tb->lbytes, -1);\ else\ set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\ -1, -1);\ } #define SET_PAR_SHIFT_RIGHT \ if (h)\ {\ int to_r;\ \ to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\ \ set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\ }\ else \ {\ if (rset==RIGHT_SHIFT_FLOW)\ set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\ -1, tb->rbytes);\ else\ set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\ -1, -1);\ } static void free_buffers_in_tb(struct tree_balance *tb) { int i; pathrelse(tb->tb_path); for (i = 0; i < MAX_HEIGHT; i++) { brelse(tb->L[i]); brelse(tb->R[i]); brelse(tb->FL[i]); brelse(tb->FR[i]); brelse(tb->CFL[i]); brelse(tb->CFR[i]); tb->L[i] = NULL; tb->R[i] = NULL; tb->FL[i] = NULL; tb->FR[i] = NULL; tb->CFL[i] = NULL; tb->CFR[i] = NULL; } } /* Get new buffers for storing new nodes that are created while balancing. * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; * NO_DISK_SPACE - no disk space. */ /* The function is NOT SCHEDULE-SAFE! */ static int get_empty_nodes(struct tree_balance *tb, int h) { struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h); b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */ retval = CARRY_ON; struct super_block *sb = tb->tb_sb; /* number_of_freeblk is the number of empty blocks which have been acquired for use by the balancing algorithm minus the number of empty blocks used in the previous levels of the analysis, number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs after empty blocks are acquired, and the balancing analysis is then restarted, amount_needed is the number needed by this level (h) of the balancing analysis. Note that for systems with many processes writing, it would be more layout optimal to calculate the total number needed by all levels and then to run reiserfs_new_blocks to get all of them at once. */ /* Initiate number_of_freeblk to the amount acquired prior to the restart of the analysis or 0 if not restarted, then subtract the amount needed by all of the levels of the tree below h. */ /* blknum includes S[h], so we subtract 1 in this calculation */ for (counter = 0, number_of_freeblk = tb->cur_blknum; counter < h; counter++) number_of_freeblk -= (tb->blknum[counter]) ? (tb->blknum[counter] - 1) : 0; /* Allocate missing empty blocks. */ /* if Sh == 0 then we are getting a new root */ amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1; /* Amount_needed = the amount that we need more than the amount that we have. */ if (amount_needed > number_of_freeblk) amount_needed -= number_of_freeblk; else /* If we have enough already then there is nothing to do. */ return CARRY_ON; /* No need to check quota - is not allocated for blocks used for formatted nodes */ if (reiserfs_new_form_blocknrs(tb, blocknrs, amount_needed) == NO_DISK_SPACE) return NO_DISK_SPACE; /* for each blocknumber we just got, get a buffer and stick it on FEB */ for (blocknr = blocknrs, counter = 0; counter < amount_needed; blocknr++, counter++) { RFALSE(!*blocknr, "PAP-8135: reiserfs_new_blocknrs failed when got new blocks"); new_bh = sb_getblk(sb, *blocknr); RFALSE(buffer_dirty(new_bh) || buffer_journaled(new_bh) || buffer_journal_dirty(new_bh), "PAP-8140: journaled or dirty buffer %b for the new block", new_bh); /* Put empty buffers into the array. */ RFALSE(tb->FEB[tb->cur_blknum], "PAP-8141: busy slot for new buffer"); set_buffer_journal_new(new_bh); tb->FEB[tb->cur_blknum++] = new_bh; } if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb)) retval = REPEAT_SEARCH; return retval; } /* Get free space of the left neighbor, which is stored in the parent * node of the left neighbor. */ static int get_lfree(struct tree_balance *tb, int h) { struct buffer_head *l, *f; int order; if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || (l = tb->FL[h]) == NULL) return 0; if (f == l) order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1; else { order = B_NR_ITEMS(l); f = l; } return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order))); } /* Get free space of the right neighbor, * which is stored in the parent node of the right neighbor. */ static int get_rfree(struct tree_balance *tb, int h) { struct buffer_head *r, *f; int order; if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || (r = tb->FR[h]) == NULL) return 0; if (f == r) order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1; else { order = 0; f = r; } return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order))); } /* Check whether left neighbor is in memory. */ static int is_left_neighbor_in_cache(struct tree_balance *tb, int h) { struct buffer_head *father, *left; struct super_block *sb = tb->tb_sb; b_blocknr_t left_neighbor_blocknr; int left_neighbor_position; /* Father of the left neighbor does not exist. */ if (!tb->FL[h]) return 0; /* Calculate father of the node to be balanced. */ father = PATH_H_PBUFFER(tb->tb_path, h + 1); RFALSE(!father || !B_IS_IN_TREE(father) || !B_IS_IN_TREE(tb->FL[h]) || !buffer_uptodate(father) || !buffer_uptodate(tb->FL[h]), "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", father, tb->FL[h]); /* Get position of the pointer to the left neighbor into the left father. */ left_neighbor_position = (father == tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->FL[h]); /* Get left neighbor block number. */ left_neighbor_blocknr = B_N_CHILD_NUM(tb->FL[h], left_neighbor_position); /* Look for the left neighbor in the cache. */ if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) { RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left), "vs-8170: left neighbor (%b %z) is not in the tree", left, left); put_bh(left); return 1; } return 0; } #define LEFT_PARENTS 'l' #define RIGHT_PARENTS 'r' static void decrement_key(struct cpu_key *key) { // call item specific function for this key item_ops[cpu_key_k_type(key)]->decrement_key(key); } /* Calculate far left/right parent of the left/right neighbor of the current node, that * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h]. * Calculate left/right common parent of the current node and L[h]/R[h]. * Calculate left/right delimiting key position. * Returns: PATH_INCORRECT - path in the tree is not correct; SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ static int get_far_parent(struct tree_balance *tb, int h, struct buffer_head **pfather, struct buffer_head **pcom_father, char c_lr_par) { struct buffer_head *parent; INITIALIZE_PATH(s_path_to_neighbor_father); struct treepath *path = tb->tb_path; struct cpu_key s_lr_father_key; int counter, position = INT_MAX, first_last_position = 0, path_offset = PATH_H_PATH_OFFSET(path, h); /* Starting from F[h] go upwards in the tree, and look for the common ancestor of F[h], and its neighbor l/r, that should be obtained. */ counter = path_offset; RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET, "PAP-8180: invalid path length"); for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) { /* Check whether parent of the current buffer in the path is really parent in the tree. */ if (!B_IS_IN_TREE (parent = PATH_OFFSET_PBUFFER(path, counter - 1))) return REPEAT_SEARCH; /* Check whether position in the parent is correct. */ if ((position = PATH_OFFSET_POSITION(path, counter - 1)) > B_NR_ITEMS(parent)) return REPEAT_SEARCH; /* Check whether parent at the path really points to the child. */ if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(path, counter)->b_blocknr) return REPEAT_SEARCH; /* Return delimiting key if position in the parent is not equal to first/last one. */ if (c_lr_par == RIGHT_PARENTS) first_last_position = B_NR_ITEMS(parent); if (position != first_last_position) { *pcom_father = parent; get_bh(*pcom_father); /*(*pcom_father = parent)->b_count++; */ break; } } /* if we are in the root of the tree, then there is no common father */ if (counter == FIRST_PATH_ELEMENT_OFFSET) { /* Check whether first buffer in the path is the root of the tree. */ if (PATH_OFFSET_PBUFFER (tb->tb_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { *pfather = *pcom_father = NULL; return CARRY_ON; } return REPEAT_SEARCH; } RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL, "PAP-8185: (%b %z) level too small", *pcom_father, *pcom_father); /* Check whether the common parent is locked. */ if (buffer_locked(*pcom_father)) { /* Release the write lock while the buffer is busy */ reiserfs_write_unlock(tb->tb_sb); __wait_on_buffer(*pcom_father); reiserfs_write_lock(tb->tb_sb); if (FILESYSTEM_CHANGED_TB(tb)) { brelse(*pcom_father); return REPEAT_SEARCH; } } /* So, we got common parent of the current node and its left/right neighbor. Now we are geting the parent of the left/right neighbor. */ /* Form key to get parent of the left/right neighbor. */ le_key2cpu_key(&s_lr_father_key, B_N_PDELIM_KEY(*pcom_father, (c_lr_par == LEFT_PARENTS) ? (tb->lkey[h - 1] = position - 1) : (tb->rkey[h - 1] = position))); if (c_lr_par == LEFT_PARENTS) decrement_key(&s_lr_father_key); if (search_by_key (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, h + 1) == IO_ERROR) // path is released return IO_ERROR; if (FILESYSTEM_CHANGED_TB(tb)) { pathrelse(&s_path_to_neighbor_father); brelse(*pcom_father); return REPEAT_SEARCH; } *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father); RFALSE(B_LEVEL(*pfather) != h + 1, "PAP-8190: (%b %z) level too small", *pfather, *pfather); RFALSE(s_path_to_neighbor_father.path_length < FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small"); s_path_to_neighbor_father.path_length--; pathrelse(&s_path_to_neighbor_father); return CARRY_ON; } /* Get parents of neighbors of node in the path(S[path_offset]) and common parents of * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset], * FR[path_offset], CFL[path_offset], CFR[path_offset]. * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset]. * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ static int get_parents(struct tree_balance *tb, int h) { struct treepath *path = tb->tb_path; int position, ret, path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); struct buffer_head *curf, *curcf; /* Current node is the root of the tree or will be root of the tree */ if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) { /* The root can not have parents. Release nodes which previously were obtained as parents of the current node neighbors. */ brelse(tb->FL[h]); brelse(tb->CFL[h]); brelse(tb->FR[h]); brelse(tb->CFR[h]); tb->FL[h] = NULL; tb->CFL[h] = NULL; tb->FR[h] = NULL; tb->CFR[h] = NULL; return CARRY_ON; } /* Get parent FL[path_offset] of L[path_offset]. */ position = PATH_OFFSET_POSITION(path, path_offset - 1); if (position) { /* Current node is not the first child of its parent. */ curf = PATH_OFFSET_PBUFFER(path, path_offset - 1); curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1); get_bh(curf); get_bh(curf); tb->lkey[h] = position - 1; } else { /* Calculate current parent of L[path_offset], which is the left neighbor of the current node. Calculate current common parent of L[path_offset] and the current node. Note that CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset]. Calculate lkey[path_offset]. */ if ((ret = get_far_parent(tb, h + 1, &curf, &curcf, LEFT_PARENTS)) != CARRY_ON) return ret; } brelse(tb->FL[h]); tb->FL[h] = curf; /* New initialization of FL[h]. */ brelse(tb->CFL[h]); tb->CFL[h] = curcf; /* New initialization of CFL[h]. */ RFALSE((curf && !B_IS_IN_TREE(curf)) || (curcf && !B_IS_IN_TREE(curcf)), "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf); /* Get parent FR[h] of R[h]. */ /* Current node is the last child of F[h]. FR[h] != F[h]. */ if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) { /* Calculate current parent of R[h], which is the right neighbor of F[h]. Calculate current common parent of R[h] and current node. Note that CFR[h] not equal FR[path_offset] and CFR[h] not equal F[h]. */ if ((ret = get_far_parent(tb, h + 1, &curf, &curcf, RIGHT_PARENTS)) != CARRY_ON) return ret; } else { /* Current node is not the last child of its parent F[h]. */ curf = PATH_OFFSET_PBUFFER(path, path_offset - 1); curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1); get_bh(curf); get_bh(curf); tb->rkey[h] = position; } brelse(tb->FR[h]); /* New initialization of FR[path_offset]. */ tb->FR[h] = curf; brelse(tb->CFR[h]); /* New initialization of CFR[path_offset]. */ tb->CFR[h] = curcf; RFALSE((curf && !B_IS_IN_TREE(curf)) || (curcf && !B_IS_IN_TREE(curcf)), "PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf); return CARRY_ON; } /* it is possible to remove node as result of shiftings to neighbors even when we insert or paste item. */ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, struct tree_balance *tb, int h) { struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h); int levbytes = tb->insert_size[h]; struct item_head *ih; struct reiserfs_key *r_key = NULL; ih = B_N_PITEM_HEAD(Sh, 0); if (tb->CFR[h]) r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]); if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes /* shifting may merge items which might save space */ - ((!h && op_is_left_mergeable(&(ih->ih_key), Sh->b_size)) ? IH_SIZE : 0) - ((!h && r_key && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0) + ((h) ? KEY_SIZE : 0)) { /* node can not be removed */ if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */ if (!h) tb->s0num = B_NR_ITEMS(Sh) + ((mode == M_INSERT) ? 1 : 0); set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; } } PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]); return !NO_BALANCING_NEEDED; } /* Check whether current node S[h] is balanced when increasing its size by * Inserting or Pasting. * Calculate parameters for balancing for current level h. * Parameters: * tb tree_balance structure; * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste; * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. */ /* ip means Inserting or Pasting */ static int ip_check_balance(struct tree_balance *tb, int h) { struct virtual_node *vn = tb->tb_vn; int levbytes, /* Number of bytes that must be inserted into (value is negative if bytes are deleted) buffer which contains node being balanced. The mnemonic is that the attempted change in node space used level is levbytes bytes. */ ret; int lfree, sfree, rfree /* free space in L, S and R */ ; /* nver is short for number of vertixes, and lnver is the number if we shift to the left, rnver is the number if we shift to the right, and lrnver is the number if we shift in both directions. The goal is to minimize first the number of vertixes, and second, the number of vertixes whose contents are changed by shifting, and third the number of uncached vertixes whose contents are changed by shifting and must be read from disk. */ int nver, lnver, rnver, lrnver; /* used at leaf level only, S0 = S[0] is the node being balanced, sInum [ I = 0,1,2 ] is the number of items that will remain in node SI after balancing. S1 and S2 are new nodes that might be created. */ /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters. where 4th parameter is s1bytes and 5th - s2bytes */ short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases 0,1 - do not shift and do not shift but bottle 2 - shift only whole item to left 3 - shift to left and bottle as much as possible 4,5 - shift to right (whole items and as much as possible 6,7 - shift to both directions (whole items and as much as possible) */ /* Sh is the node whose balance is currently being checked */ struct buffer_head *Sh; Sh = PATH_H_PBUFFER(tb->tb_path, h); levbytes = tb->insert_size[h]; /* Calculate balance parameters for creating new root. */ if (!Sh) { if (!h) reiserfs_panic(tb->tb_sb, "vs-8210", "S[0] can not be 0"); switch (ret = get_empty_nodes(tb, h)) { case CARRY_ON: set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */ case NO_DISK_SPACE: case REPEAT_SEARCH: return ret; default: reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect " "return value of get_empty_nodes"); } } if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */ return ret; sfree = B_FREE_SPACE(Sh); /* get free space of neighbors */ rfree = get_rfree(tb, h); lfree = get_lfree(tb, h); if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) == NO_BALANCING_NEEDED) /* and new item fits into node S[h] without any shifting */ return NO_BALANCING_NEEDED; create_virtual_node(tb, h); /* determine maximal number of items we can shift to the left neighbor (in tb structure) and the maximal number of bytes that can flow to the left neighbor from the left most liquid item that cannot be shifted from S[0] entirely (returned value) */ check_left(tb, h, lfree); /* determine maximal number of items we can shift to the right neighbor (in tb structure) and the maximal number of bytes that can flow to the right neighbor from the right most liquid item that cannot be shifted from S[0] entirely (returned value) */ check_right(tb, h, rfree); /* all contents of internal node S[h] can be moved into its neighbors, S[h] will be removed after balancing */ if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) { int to_r; /* Since we are working on internal nodes, and our internal nodes have fixed size entries, then we can balance by the number of items rather than the space they consume. In this routine we set the left node equal to the right node, allowing a difference of less than or equal to 1 child pointer. */ to_r = ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]); set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1); return CARRY_ON; } /* this checks balance condition, that any two neighboring nodes can not fit in one node */ RFALSE(h && (tb->lnum[h] >= vn->vn_nr_item + 1 || tb->rnum[h] >= vn->vn_nr_item + 1), "vs-8220: tree is not balanced on internal level"); RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) || (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))), "vs-8225: tree is not balanced on leaf level"); /* all contents of S[0] can be moved into its neighbors S[0] will be removed after balancing. */ if (!h && is_leaf_removable(tb)) return CARRY_ON; /* why do we perform this check here rather than earlier?? Answer: we can win 1 node in some cases above. Moreover we checked it above, when we checked, that S[0] is not removable in principle */ if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */ if (!h) tb->s0num = vn->vn_nr_item; set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; } { int lpar, rpar, nset, lset, rset, lrset; /* * regular overflowing of the node */ /* get_num_ver works in 2 modes (FLOW & NO_FLOW) lpar, rpar - number of items we can shift to left/right neighbor (including splitting item) nset, lset, rset, lrset - shows, whether flowing items give better packing */ #define FLOW 1 #define NO_FLOW 0 /* do not any splitting */ /* we choose one the following */ #define NOTHING_SHIFT_NO_FLOW 0 #define NOTHING_SHIFT_FLOW 5 #define LEFT_SHIFT_NO_FLOW 10 #define LEFT_SHIFT_FLOW 15 #define RIGHT_SHIFT_NO_FLOW 20 #define RIGHT_SHIFT_FLOW 25 #define LR_SHIFT_NO_FLOW 30 #define LR_SHIFT_FLOW 35 lpar = tb->lnum[h]; rpar = tb->rnum[h]; /* calculate number of blocks S[h] must be split into when nothing is shifted to the neighbors, as well as number of items in each part of the split node (s012 numbers), and number of bytes (s1bytes) of the shared drop which flow to S1 if any */ nset = NOTHING_SHIFT_NO_FLOW; nver = get_num_ver(vn->vn_mode, tb, h, 0, -1, h ? vn->vn_nr_item : 0, -1, snum012, NO_FLOW); if (!h) { int nver1; /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */ nver1 = get_num_ver(vn->vn_mode, tb, h, 0, -1, 0, -1, snum012 + NOTHING_SHIFT_FLOW, FLOW); if (nver > nver1) nset = NOTHING_SHIFT_FLOW, nver = nver1; } /* calculate number of blocks S[h] must be split into when l_shift_num first items and l_shift_bytes of the right most liquid item to be shifted are shifted to the left neighbor, as well as number of items in each part of the splitted node (s012 numbers), and number of bytes (s1bytes) of the shared drop which flow to S1 if any */ lset = LEFT_SHIFT_NO_FLOW; lnver = get_num_ver(vn->vn_mode, tb, h, lpar - ((h || tb->lbytes == -1) ? 0 : 1), -1, h ? vn->vn_nr_item : 0, -1, snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW); if (!h) { int lnver1; lnver1 = get_num_ver(vn->vn_mode, tb, h, lpar - ((tb->lbytes != -1) ? 1 : 0), tb->lbytes, 0, -1, snum012 + LEFT_SHIFT_FLOW, FLOW); if (lnver > lnver1) lset = LEFT_SHIFT_FLOW, lnver = lnver1; } /* calculate number of blocks S[h] must be split into when r_shift_num first items and r_shift_bytes of the left most liquid item to be shifted are shifted to the right neighbor, as well as number of items in each part of the splitted node (s012 numbers), and number of bytes (s1bytes) of the shared drop which flow to S1 if any */ rset = RIGHT_SHIFT_NO_FLOW; rnver = get_num_ver(vn->vn_mode, tb, h, 0, -1, h ? (vn->vn_nr_item - rpar) : (rpar - ((tb-> rbytes != -1) ? 1 : 0)), -1, snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW); if (!h) { int rnver1; rnver1 = get_num_ver(vn->vn_mode, tb, h, 0, -1, (rpar - ((tb->rbytes != -1) ? 1 : 0)), tb->rbytes, snum012 + RIGHT_SHIFT_FLOW, FLOW); if (rnver > rnver1) rset = RIGHT_SHIFT_FLOW, rnver = rnver1; } /* calculate number of blocks S[h] must be split into when items are shifted in both directions, as well as number of items in each part of the splitted node (s012 numbers), and number of bytes (s1bytes) of the shared drop which flow to S1 if any */ lrset = LR_SHIFT_NO_FLOW; lrnver = get_num_ver(vn->vn_mode, tb, h, lpar - ((h || tb->lbytes == -1) ? 0 : 1), -1, h ? (vn->vn_nr_item - rpar) : (rpar - ((tb-> rbytes != -1) ? 1 : 0)), -1, snum012 + LR_SHIFT_NO_FLOW, NO_FLOW); if (!h) { int lrnver1; lrnver1 = get_num_ver(vn->vn_mode, tb, h, lpar - ((tb->lbytes != -1) ? 1 : 0), tb->lbytes, (rpar - ((tb->rbytes != -1) ? 1 : 0)), tb->rbytes, snum012 + LR_SHIFT_FLOW, FLOW); if (lrnver > lrnver1) lrset = LR_SHIFT_FLOW, lrnver = lrnver1; } /* Our general shifting strategy is: 1) to minimized number of new nodes; 2) to minimized number of neighbors involved in shifting; 3) to minimized number of disk reads; */ /* we can win TWO or ONE nodes by shifting in both directions */ if (lrnver < lnver && lrnver < rnver) { RFALSE(h && (tb->lnum[h] != 1 || tb->rnum[h] != 1 || lrnver != 1 || rnver != 2 || lnver != 2 || h != 1), "vs-8230: bad h"); if (lrset == LR_SHIFT_FLOW) set_parameters(tb, h, tb->lnum[h], tb->rnum[h], lrnver, snum012 + lrset, tb->lbytes, tb->rbytes); else set_parameters(tb, h, tb->lnum[h] - ((tb->lbytes == -1) ? 0 : 1), tb->rnum[h] - ((tb->rbytes == -1) ? 0 : 1), lrnver, snum012 + lrset, -1, -1); return CARRY_ON; } /* if shifting doesn't lead to better packing then don't shift */ if (nver == lrnver) { set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1, -1); return CARRY_ON; } /* now we know that for better packing shifting in only one direction either to the left or to the right is required */ /* if shifting to the left is better than shifting to the right */ if (lnver < rnver) { SET_PAR_SHIFT_LEFT; return CARRY_ON; } /* if shifting to the right is better than shifting to the left */ if (lnver > rnver) { SET_PAR_SHIFT_RIGHT; return CARRY_ON; } /* now shifting in either direction gives the same number of nodes and we can make use of the cached neighbors */ if (is_left_neighbor_in_cache(tb, h)) { SET_PAR_SHIFT_LEFT; return CARRY_ON; } /* shift to the right independently on whether the right neighbor in cache or not */ SET_PAR_SHIFT_RIGHT; return CARRY_ON; } } /* Check whether current node S[h] is balanced when Decreasing its size by * Deleting or Cutting for INTERNAL node of S+tree. * Calculate parameters for balancing for current level h. * Parameters: * tb tree_balance structure; * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste; * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. * * Note: Items of internal nodes have fixed size, so the balance condition for * the internal part of S+tree is as for the B-trees. */ static int dc_check_balance_internal(struct tree_balance *tb, int h) { struct virtual_node *vn = tb->tb_vn; /* Sh is the node whose balance is currently being checked, and Fh is its father. */ struct buffer_head *Sh, *Fh; int maxsize, ret; int lfree, rfree /* free space in L and R */ ; Sh = PATH_H_PBUFFER(tb->tb_path, h); Fh = PATH_H_PPARENT(tb->tb_path, h); maxsize = MAX_CHILD_SIZE(Sh); /* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */ /* new_nr_item = number of items node would have if operation is */ /* performed without balancing (new_nr_item); */ create_virtual_node(tb, h); if (!Fh) { /* S[h] is the root. */ if (vn->vn_nr_item > 0) { set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */ } /* new_nr_item == 0. * Current root will be deleted resulting in * decrementing the tree height. */ set_parameters(tb, h, 0, 0, 0, NULL, -1, -1); return CARRY_ON; } if ((ret = get_parents(tb, h)) != CARRY_ON) return ret; /* get free space of neighbors */ rfree = get_rfree(tb, h); lfree = get_lfree(tb, h); /* determine maximal number of items we can fit into neighbors */ check_left(tb, h, lfree); check_right(tb, h, rfree); if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { /* Balance condition for the internal node is valid. * In this case we balance only if it leads to better packing. */ if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { /* Here we join S[h] with one of its neighbors, * which is impossible with greater values of new_nr_item. */ if (tb->lnum[h] >= vn->vn_nr_item + 1) { /* All contents of S[h] can be moved to L[h]. */ int n; int order_L; order_L = ((n = PATH_H_B_ITEM_ORDER(tb->tb_path, h)) == 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE + KEY_SIZE); set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1); return CARRY_ON; } if (tb->rnum[h] >= vn->vn_nr_item + 1) { /* All contents of S[h] can be moved to R[h]. */ int n; int order_R; order_R = ((n = PATH_H_B_ITEM_ORDER(tb->tb_path, h)) == B_NR_ITEMS(Fh)) ? 0 : n + 1; n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE + KEY_SIZE); set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1); return CARRY_ON; } } if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */ int to_r; to_r = ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]); set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1); return CARRY_ON; } /* Balancing does not lead to better packing. */ set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; } /* Current node contain insufficient number of items. Balancing is required. */ /* Check whether we can merge S[h] with left neighbor. */ if (tb->lnum[h] >= vn->vn_nr_item + 1) if (is_left_neighbor_in_cache(tb, h) || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) { int n; int order_L; order_L = ((n = PATH_H_B_ITEM_ORDER(tb->tb_path, h)) == 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE + KEY_SIZE); set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1); return CARRY_ON; } /* Check whether we can merge S[h] with right neighbor. */ if (tb->rnum[h] >= vn->vn_nr_item + 1) { int n; int order_R; order_R = ((n = PATH_H_B_ITEM_ORDER(tb->tb_path, h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1); n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE + KEY_SIZE); set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1); return CARRY_ON; } /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */ if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { int to_r; to_r = ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]); set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, -1, -1); return CARRY_ON; } /* For internal nodes try to borrow item from a neighbor */ RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root"); /* Borrow one or two items from caching neighbor */ if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) { int from_l; from_l = (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item + 1) / 2 - (vn->vn_nr_item + 1); set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1); return CARRY_ON; } set_parameters(tb, h, 0, -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item + 1) / 2 - (vn->vn_nr_item + 1)), 1, NULL, -1, -1); return CARRY_ON; } /* Check whether current node S[h] is balanced when Decreasing its size by * Deleting or Truncating for LEAF node of S+tree. * Calculate parameters for balancing for current level h. * Parameters: * tb tree_balance structure; * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste; * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. */ static int dc_check_balance_leaf(struct tree_balance *tb, int h) { struct virtual_node *vn = tb->tb_vn; /* Number of bytes that must be deleted from (value is negative if bytes are deleted) buffer which contains node being balanced. The mnemonic is that the attempted change in node space used level is levbytes bytes. */ int levbytes; /* the maximal item size */ int maxsize, ret; /* S0 is the node whose balance is currently being checked, and F0 is its father. */ struct buffer_head *S0, *F0; int lfree, rfree /* free space in L and R */ ; S0 = PATH_H_PBUFFER(tb->tb_path, 0); F0 = PATH_H_PPARENT(tb->tb_path, 0); levbytes = tb->insert_size[h]; maxsize = MAX_CHILD_SIZE(S0); /* maximal possible size of an item */ if (!F0) { /* S[0] is the root now. */ RFALSE(-levbytes >= maxsize - B_FREE_SPACE(S0), "vs-8240: attempt to create empty buffer tree"); set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; } if ((ret = get_parents(tb, h)) != CARRY_ON) return ret; /* get free space of neighbors */ rfree = get_rfree(tb, h); lfree = get_lfree(tb, h); create_virtual_node(tb, h); /* if 3 leaves can be merge to one, set parameters and return */ if (are_leaves_removable(tb, lfree, rfree)) return CARRY_ON; /* determine maximal number of items we can shift to the left/right neighbor and the maximal number of bytes that can flow to the left/right neighbor from the left/right most liquid item that cannot be shifted from S[0] entirely */ check_left(tb, h, lfree); check_right(tb, h, rfree); /* check whether we can merge S with left neighbor. */ if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1) if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */ !tb->FR[h]) { RFALSE(!tb->FL[h], "vs-8245: dc_check_balance_leaf: FL[h] must exist"); /* set parameter to merge S[0] with its left neighbor */ set_parameters(tb, h, -1, 0, 0, NULL, -1, -1); return CARRY_ON; } /* check whether we can merge S[0] with right neighbor. */ if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) { set_parameters(tb, h, 0, -1, 0, NULL, -1, -1); return CARRY_ON; } /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */ if (is_leaf_removable(tb)) return CARRY_ON; /* Balancing is not required. */ tb->s0num = vn->vn_nr_item; set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); return NO_BALANCING_NEEDED; } /* Check whether current node S[h] is balanced when Decreasing its size by * Deleting or Cutting. * Calculate parameters for balancing for current level h. * Parameters: * tb tree_balance structure; * h current level of the node; * inum item number in S[h]; * mode d - delete, c - cut. * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. */ static int dc_check_balance(struct tree_balance *tb, int h) { RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)), "vs-8250: S is not initialized"); if (h) return dc_check_balance_internal(tb, h); else return dc_check_balance_leaf(tb, h); } /* Check whether current node S[h] is balanced. * Calculate parameters for balancing for current level h. * Parameters: * * tb tree_balance structure: * * tb is a large structure that must be read about in the header file * at the same time as this procedure if the reader is to successfully * understand this procedure * * h current level of the node; * inum item number in S[h]; * mode i - insert, p - paste, d - delete, c - cut. * Returns: 1 - schedule occurred; * 0 - balancing for higher levels needed; * -1 - no balancing for higher levels needed; * -2 - no disk space. */ static int check_balance(int mode, struct tree_balance *tb, int h, int inum, int pos_in_item, struct item_head *ins_ih, const void *data) { struct virtual_node *vn; vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf); vn->vn_free_ptr = (char *)(tb->tb_vn + 1); vn->vn_mode = mode; vn->vn_affected_item_num = inum; vn->vn_pos_in_item = pos_in_item; vn->vn_ins_ih = ins_ih; vn->vn_data = data; RFALSE(mode == M_INSERT && !vn->vn_ins_ih, "vs-8255: ins_ih can not be 0 in insert mode"); if (tb->insert_size[h] > 0) /* Calculate balance parameters when size of node is increasing. */ return ip_check_balance(tb, h); /* Calculate balance parameters when size of node is decreasing. */ return dc_check_balance(tb, h); } /* Check whether parent at the path is the really parent of the current node.*/ static int get_direct_parent(struct tree_balance *tb, int h) { struct buffer_head *bh; struct treepath *path = tb->tb_path; int position, path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); /* We are in the root or in the new root. */ if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) { RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1, "PAP-8260: invalid offset in the path"); if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { /* Root is not changed. */ PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL; PATH_OFFSET_POSITION(path, path_offset - 1) = 0; return CARRY_ON; } return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */ } if (!B_IS_IN_TREE (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1))) return REPEAT_SEARCH; /* Parent in the path is not in the tree. */ if ((position = PATH_OFFSET_POSITION(path, path_offset - 1)) > B_NR_ITEMS(bh)) return REPEAT_SEARCH; if (B_N_CHILD_NUM(bh, position) != PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr) /* Parent in the path is not parent of the current node in the tree. */ return REPEAT_SEARCH; if (buffer_locked(bh)) { reiserfs_write_unlock(tb->tb_sb); __wait_on_buffer(bh); reiserfs_write_lock(tb->tb_sb); if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; } return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */ } /* Using lnum[h] and rnum[h] we should determine what neighbors * of S[h] we * need in order to balance S[h], and get them if necessary. * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; * CARRY_ON - schedule didn't occur while the function worked; */ static int get_neighbors(struct tree_balance *tb, int h) { int child_position, path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1); unsigned long son_number; struct super_block *sb = tb->tb_sb; struct buffer_head *bh; PROC_INFO_INC(sb, get_neighbors[h]); if (tb->lnum[h]) { /* We need left neighbor to balance S[h]. */ PROC_INFO_INC(sb, need_l_neighbor[h]); bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); RFALSE(bh == tb->FL[h] && !PATH_OFFSET_POSITION(tb->tb_path, path_offset), "PAP-8270: invalid position in the parent"); child_position = (bh == tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb-> FL[h]); son_number = B_N_CHILD_NUM(tb->FL[h], child_position); reiserfs_write_unlock(sb); bh = sb_bread(sb, son_number); reiserfs_write_lock(sb); if (!bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(tb)) { brelse(bh); PROC_INFO_INC(sb, get_neighbors_restart[h]); return REPEAT_SEARCH; } RFALSE(!B_IS_IN_TREE(tb->FL[h]) || child_position > B_NR_ITEMS(tb->FL[h]) || B_N_CHILD_NUM(tb->FL[h], child_position) != bh->b_blocknr, "PAP-8275: invalid parent"); RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child"); RFALSE(!h && B_FREE_SPACE(bh) != MAX_CHILD_SIZE(bh) - dc_size(B_N_CHILD(tb->FL[0], child_position)), "PAP-8290: invalid child size of left neighbor"); brelse(tb->L[h]); tb->L[h] = bh; } /* We need right neighbor to balance S[path_offset]. */ if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */ PROC_INFO_INC(sb, need_r_neighbor[h]); bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); RFALSE(bh == tb->FR[h] && PATH_OFFSET_POSITION(tb->tb_path, path_offset) >= B_NR_ITEMS(bh), "PAP-8295: invalid position in the parent"); child_position = (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0; son_number = B_N_CHILD_NUM(tb->FR[h], child_position); reiserfs_write_unlock(sb); bh = sb_bread(sb, son_number); reiserfs_write_lock(sb); if (!bh) return IO_ERROR; if (FILESYSTEM_CHANGED_TB(tb)) { brelse(bh); PROC_INFO_INC(sb, get_neighbors_restart[h]); return REPEAT_SEARCH; } brelse(tb->R[h]); tb->R[h] = bh; RFALSE(!h && B_FREE_SPACE(bh) != MAX_CHILD_SIZE(bh) - dc_size(B_N_CHILD(tb->FR[0], child_position)), "PAP-8300: invalid child size of right neighbor (%d != %d - %d)", B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh), dc_size(B_N_CHILD(tb->FR[0], child_position))); } return CARRY_ON; } static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh) { int max_num_of_items; int max_num_of_entries; unsigned long blocksize = sb->s_blocksize; #define MIN_NAME_LEN 1 max_num_of_items = (blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN); max_num_of_entries = (blocksize - BLKH_SIZE - IH_SIZE) / (DEH_SIZE + MIN_NAME_LEN); return sizeof(struct virtual_node) + max(max_num_of_items * sizeof(struct virtual_item), sizeof(struct virtual_item) + sizeof(struct direntry_uarea) + (max_num_of_entries - 1) * sizeof(__u16)); } /* maybe we should fail balancing we are going to perform when kmalloc fails several times. But now it will loop until kmalloc gets required memory */ static int get_mem_for_virtual_node(struct tree_balance *tb) { int check_fs = 0; int size; char *buf; size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path)); if (size > tb->vn_buf_size) { /* we have to allocate more memory for virtual node */ if (tb->vn_buf) { /* free memory allocated before */ kfree(tb->vn_buf); /* this is not needed if kfree is atomic */ check_fs = 1; } /* virtual node requires now more memory */ tb->vn_buf_size = size; /* get memory for virtual item */ buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN); if (!buf) { /* getting memory with GFP_KERNEL priority may involve balancing now (due to indirect_to_direct conversion on dcache shrinking). So, release path and collected resources here */ free_buffers_in_tb(tb); buf = kmalloc(size, GFP_NOFS); if (!buf) { tb->vn_buf_size = 0; } tb->vn_buf = buf; schedule(); return REPEAT_SEARCH; } tb->vn_buf = buf; } if (check_fs && FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; return CARRY_ON; } #ifdef CONFIG_REISERFS_CHECK static void tb_buffer_sanity_check(struct super_block *sb, struct buffer_head *bh, const char *descr, int level) { if (bh) { if (atomic_read(&(bh->b_count)) <= 0) reiserfs_panic(sb, "jmacd-1", "negative or zero " "reference counter for buffer %s[%d] " "(%b)", descr, level, bh); if (!buffer_uptodate(bh)) reiserfs_panic(sb, "jmacd-2", "buffer is not up " "to date %s[%d] (%b)", descr, level, bh); if (!B_IS_IN_TREE(bh)) reiserfs_panic(sb, "jmacd-3", "buffer is not " "in tree %s[%d] (%b)", descr, level, bh); if (bh->b_bdev != sb->s_bdev) reiserfs_panic(sb, "jmacd-4", "buffer has wrong " "device %s[%d] (%b)", descr, level, bh); if (bh->b_size != sb->s_blocksize) reiserfs_panic(sb, "jmacd-5", "buffer has wrong " "blocksize %s[%d] (%b)", descr, level, bh); if (bh->b_blocknr > SB_BLOCK_COUNT(sb)) reiserfs_panic(sb, "jmacd-6", "buffer block " "number too high %s[%d] (%b)", descr, level, bh); } } #else static void tb_buffer_sanity_check(struct super_block *sb, struct buffer_head *bh, const char *descr, int level) {; } #endif static int clear_all_dirty_bits(struct super_block *s, struct buffer_head *bh) { return reiserfs_prepare_for_journal(s, bh, 0); } static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) { struct buffer_head *locked; #ifdef CONFIG_REISERFS_CHECK int repeat_counter = 0; #endif int i; do { locked = NULL; for (i = tb->tb_path->path_length; !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) { if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) { /* if I understand correctly, we can only be sure the last buffer ** in the path is in the tree --clm */ #ifdef CONFIG_REISERFS_CHECK if (PATH_PLAST_BUFFER(tb->tb_path) == PATH_OFFSET_PBUFFER(tb->tb_path, i)) tb_buffer_sanity_check(tb->tb_sb, PATH_OFFSET_PBUFFER (tb->tb_path, i), "S", tb->tb_path-> path_length - i); #endif if (!clear_all_dirty_bits(tb->tb_sb, PATH_OFFSET_PBUFFER (tb->tb_path, i))) { locked = PATH_OFFSET_PBUFFER(tb->tb_path, i); } } } for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i]; i++) { if (tb->lnum[i]) { if (tb->L[i]) { tb_buffer_sanity_check(tb->tb_sb, tb->L[i], "L", i); if (!clear_all_dirty_bits (tb->tb_sb, tb->L[i])) locked = tb->L[i]; } if (!locked && tb->FL[i]) { tb_buffer_sanity_check(tb->tb_sb, tb->FL[i], "FL", i); if (!clear_all_dirty_bits (tb->tb_sb, tb->FL[i])) locked = tb->FL[i]; } if (!locked && tb->CFL[i]) { tb_buffer_sanity_check(tb->tb_sb, tb->CFL[i], "CFL", i); if (!clear_all_dirty_bits (tb->tb_sb, tb->CFL[i])) locked = tb->CFL[i]; } } if (!locked && (tb->rnum[i])) { if (tb->R[i]) { tb_buffer_sanity_check(tb->tb_sb, tb->R[i], "R", i); if (!clear_all_dirty_bits (tb->tb_sb, tb->R[i])) locked = tb->R[i]; } if (!locked && tb->FR[i]) { tb_buffer_sanity_check(tb->tb_sb, tb->FR[i], "FR", i); if (!clear_all_dirty_bits (tb->tb_sb, tb->FR[i])) locked = tb->FR[i]; } if (!locked && tb->CFR[i]) { tb_buffer_sanity_check(tb->tb_sb, tb->CFR[i], "CFR", i); if (!clear_all_dirty_bits (tb->tb_sb, tb->CFR[i])) locked = tb->CFR[i]; } } } /* as far as I can tell, this is not required. The FEB list seems ** to be full of newly allocated nodes, which will never be locked, ** dirty, or anything else. ** To be safe, I'm putting in the checks and waits in. For the moment, ** they are needed to keep the code in journal.c from complaining ** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well. ** --clm */ for (i = 0; !locked && i < MAX_FEB_SIZE; i++) { if (tb->FEB[i]) { if (!clear_all_dirty_bits (tb->tb_sb, tb->FEB[i])) locked = tb->FEB[i]; } } if (locked) { #ifdef CONFIG_REISERFS_CHECK repeat_counter++; if ((repeat_counter % 10000) == 0) { reiserfs_warning(tb->tb_sb, "reiserfs-8200", "too many iterations waiting " "for buffer to unlock " "(%b)", locked); /* Don't loop forever. Try to recover from possible error. */ return (FILESYSTEM_CHANGED_TB(tb)) ? REPEAT_SEARCH : CARRY_ON; } #endif reiserfs_write_unlock(tb->tb_sb); __wait_on_buffer(locked); reiserfs_write_lock(tb->tb_sb); if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; } } while (locked); return CARRY_ON; } /* Prepare for balancing, that is * get all necessary parents, and neighbors; * analyze what and where should be moved; * get sufficient number of new nodes; * Balancing will start only after all resources will be collected at a time. * * When ported to SMP kernels, only at the last moment after all needed nodes * are collected in cache, will the resources be locked using the usual * textbook ordered lock acquisition algorithms. Note that ensuring that * this code neither write locks what it does not need to write lock nor locks out of order * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans * * fix is meant in the sense of render unchanging * * Latency might be improved by first gathering a list of what buffers are needed * and then getting as many of them in parallel as possible? -Hans * * Parameters: * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append) * tb tree_balance structure; * inum item number in S[h]; * pos_in_item - comment this if you can * ins_ih item head of item being inserted * data inserted item or data to be pasted * Returns: 1 - schedule occurred while the function worked; * 0 - schedule didn't occur while the function worked; * -1 - if no_disk_space */ int fix_nodes(int op_mode, struct tree_balance *tb, struct item_head *ins_ih, const void *data) { int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path); int pos_in_item; /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared ** during wait_tb_buffers_run */ int wait_tb_buffers_run = 0; struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); ++REISERFS_SB(tb->tb_sb)->s_fix_nodes; pos_in_item = tb->tb_path->pos_in_item; tb->fs_gen = get_generation(tb->tb_sb); /* we prepare and log the super here so it will already be in the ** transaction when do_balance needs to change it. ** This way do_balance won't have to schedule when trying to prepare ** the super for logging */ reiserfs_prepare_for_journal(tb->tb_sb, SB_BUFFER_WITH_SB(tb->tb_sb), 1); journal_mark_dirty(tb->transaction_handle, tb->tb_sb, SB_BUFFER_WITH_SB(tb->tb_sb)); if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; /* if it possible in indirect_to_direct conversion */ if (buffer_locked(tbS0)) { reiserfs_write_unlock(tb->tb_sb); __wait_on_buffer(tbS0); reiserfs_write_lock(tb->tb_sb); if (FILESYSTEM_CHANGED_TB(tb)) return REPEAT_SEARCH; } #ifdef CONFIG_REISERFS_CHECK if (REISERFS_SB(tb->tb_sb)->cur_tb) { print_cur_tb("fix_nodes"); reiserfs_panic(tb->tb_sb, "PAP-8305", "there is pending do_balance"); } if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0)) reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is " "not uptodate at the beginning of fix_nodes " "or not in tree (mode %c)", tbS0, tbS0, op_mode); /* Check parameters. */ switch (op_mode) { case M_INSERT: if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0)) reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect " "item number %d (in S0 - %d) in case " "of insert", item_num, B_NR_ITEMS(tbS0)); break; case M_PASTE: case M_DELETE: case M_CUT: if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) { print_block(tbS0, 0, -1, -1); reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect " "item number(%d); mode = %c " "insert_size = %d", item_num, op_mode, tb->insert_size[0]); } break; default: reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode " "of operation"); } #endif if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH) // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat return REPEAT_SEARCH; /* Starting from the leaf level; for all levels h of the tree. */ for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) { ret = get_direct_parent(tb, h); if (ret != CARRY_ON) goto repeat; ret = check_balance(op_mode, tb, h, item_num, pos_in_item, ins_ih, data); if (ret != CARRY_ON) { if (ret == NO_BALANCING_NEEDED) { /* No balancing for higher levels needed. */ ret = get_neighbors(tb, h); if (ret != CARRY_ON) goto repeat; if (h != MAX_HEIGHT - 1) tb->insert_size[h + 1] = 0; /* ok, analysis and resource gathering are complete */ break; } goto repeat; } ret = get_neighbors(tb, h); if (ret != CARRY_ON) goto repeat; /* No disk space, or schedule occurred and analysis may be * invalid and needs to be redone. */ ret = get_empty_nodes(tb, h); if (ret != CARRY_ON) goto repeat; if (!PATH_H_PBUFFER(tb->tb_path, h)) { /* We have a positive insert size but no nodes exist on this level, this means that we are creating a new root. */ RFALSE(tb->blknum[h] != 1, "PAP-8350: creating new empty root"); if (h < MAX_HEIGHT - 1) tb->insert_size[h + 1] = 0; } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) { if (tb->blknum[h] > 1) { /* The tree needs to be grown, so this node S[h] which is the root node is split into two nodes, and a new node (S[h+1]) will be created to become the root node. */ RFALSE(h == MAX_HEIGHT - 1, "PAP-8355: attempt to create too high of a tree"); tb->insert_size[h + 1] = (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1) + DC_SIZE; } else if (h < MAX_HEIGHT - 1) tb->insert_size[h + 1] = 0; } else tb->insert_size[h + 1] = (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1); } ret = wait_tb_buffers_until_unlocked(tb); if (ret == CARRY_ON) { if (FILESYSTEM_CHANGED_TB(tb)) { wait_tb_buffers_run = 1; ret = REPEAT_SEARCH; goto repeat; } else { return CARRY_ON; } } else { wait_tb_buffers_run = 1; goto repeat; } repeat: // fix_nodes was unable to perform its calculation due to // filesystem got changed under us, lack of free disk space or i/o // failure. If the first is the case - the search will be // repeated. For now - free all resources acquired so far except // for the new allocated nodes { int i; /* Release path buffers. */ if (wait_tb_buffers_run) { pathrelse_and_restore(tb->tb_sb, tb->tb_path); } else { pathrelse(tb->tb_path); } /* brelse all resources collected for balancing */ for (i = 0; i < MAX_HEIGHT; i++) { if (wait_tb_buffers_run) { reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb-> CFL[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb-> CFR[i]); } brelse(tb->L[i]); brelse(tb->R[i]); brelse(tb->FL[i]); brelse(tb->FR[i]); brelse(tb->CFL[i]); brelse(tb->CFR[i]); tb->L[i] = NULL; tb->R[i] = NULL; tb->FL[i] = NULL; tb->FR[i] = NULL; tb->CFL[i] = NULL; tb->CFR[i] = NULL; } if (wait_tb_buffers_run) { for (i = 0; i < MAX_FEB_SIZE; i++) { if (tb->FEB[i]) reiserfs_restore_prepared_buffer (tb->tb_sb, tb->FEB[i]); } } return ret; } } /* Anatoly will probably forgive me renaming tb to tb. I just wanted to make lines shorter */ void unfix_nodes(struct tree_balance *tb) { int i; /* Release path buffers. */ pathrelse_and_restore(tb->tb_sb, tb->tb_path); /* brelse all resources collected for balancing */ for (i = 0; i < MAX_HEIGHT; i++) { reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]); reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]); brelse(tb->L[i]); brelse(tb->R[i]); brelse(tb->FL[i]); brelse(tb->FR[i]); brelse(tb->CFL[i]); brelse(tb->CFR[i]); } /* deal with list of allocated (used and unused) nodes */ for (i = 0; i < MAX_FEB_SIZE; i++) { if (tb->FEB[i]) { b_blocknr_t blocknr = tb->FEB[i]->b_blocknr; /* de-allocated block which was not used by balancing and bforget about buffer for it */ brelse(tb->FEB[i]); reiserfs_free_block(tb->transaction_handle, NULL, blocknr, 0); } if (tb->used[i]) { /* release used as new nodes including a new root */ brelse(tb->used[i]); } } kfree(tb->vn_buf); }
gpl-2.0
lordaragorn/infinity_335
src/game/BattleGroundRV.cpp
1
5139
/* * Copyright (C) 2005-2011 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "Player.h" #include "BattleGround.h" #include "BattleGroundRV.h" #include "ObjectMgr.h" #include "WorldPacket.h" #include "GameObject.h" #include "Language.h" BattleGroundRV::BattleGroundRV() { m_StartDelayTimes[BG_STARTING_EVENT_FIRST] = BG_START_DELAY_1M; m_StartDelayTimes[BG_STARTING_EVENT_SECOND] = BG_START_DELAY_30S; m_StartDelayTimes[BG_STARTING_EVENT_THIRD] = BG_START_DELAY_15S; m_StartDelayTimes[BG_STARTING_EVENT_FOURTH] = BG_START_DELAY_NONE; //we must set messageIds m_StartMessageIds[BG_STARTING_EVENT_FIRST] = LANG_ARENA_ONE_MINUTE; m_StartMessageIds[BG_STARTING_EVENT_SECOND] = LANG_ARENA_THIRTY_SECONDS; m_StartMessageIds[BG_STARTING_EVENT_THIRD] = LANG_ARENA_FIFTEEN_SECONDS; m_StartMessageIds[BG_STARTING_EVENT_FOURTH] = LANG_ARENA_HAS_BEGUN; } BattleGroundRV::~BattleGroundRV() { } void BattleGroundRV::Update(uint32 diff) { BattleGround::Update(diff); if (GetStatus() == STATUS_IN_PROGRESS) { if (GetStartTime() >= 47*MINUTE*IN_MILLISECONDS) // after 47 minutes without one team losing, the arena closes with no winner and no rating change EndBattleGround(TEAM_NONE); // teleport buggers if(m_uiTeleport < diff) { for(BattleGroundPlayerMap::const_iterator itr = GetPlayers().begin(); itr != GetPlayers().end(); ++itr) { Player * plr = sObjectMgr.GetPlayer(itr->first); if (plr && plr->GetPositionZ() < 27) plr->TeleportTo(618, plr->GetPositionX(), plr->GetPositionY(), 29, plr->GetOrientation(), false); if (plr && plr->GetPositionZ() < 27) plr->TeleportTo(618, plr->GetPositionX(), plr->GetPositionY(), 29, plr->GetOrientation(), false); } m_uiTeleport = 1000; } else m_uiTeleport -= diff; } } void BattleGroundRV::StartingEventCloseDoors() { } void BattleGroundRV::StartingEventOpenDoors() { OpenDoorEvent(BG_EVENT_DOOR); } void BattleGroundRV::AddPlayer(Player *plr) { BattleGround::AddPlayer(plr); //create score and add it to map, default values are set in constructor BattleGroundRVScore* sc = new BattleGroundRVScore; m_PlayerScores[plr->GetObjectGuid()] = sc; UpdateWorldState(0xe11, GetAlivePlayersCountByTeam(ALLIANCE)); UpdateWorldState(0xe10, GetAlivePlayersCountByTeam(HORDE)); } void BattleGroundRV::RemovePlayer(Player * /*plr*/, ObjectGuid /*guid*/) { if (GetStatus() == STATUS_WAIT_LEAVE) return; UpdateWorldState(0xe11, GetAlivePlayersCountByTeam(ALLIANCE)); UpdateWorldState(0xe10, GetAlivePlayersCountByTeam(HORDE)); CheckArenaWinConditions(); } void BattleGroundRV::HandleKillPlayer(Player* player, Player* killer) { if (GetStatus() != STATUS_IN_PROGRESS) return; if (!killer) { sLog.outError("BattleGroundRV: Killer player not found"); return; } BattleGround::HandleKillPlayer(player, killer); UpdateWorldState(0xe11, GetAlivePlayersCountByTeam(ALLIANCE)); UpdateWorldState(0xe10, GetAlivePlayersCountByTeam(HORDE)); CheckArenaWinConditions(); } bool BattleGroundRV::HandlePlayerUnderMap(Player *player) { player->TeleportTo(GetMapId(), 763.5f, -284, 28.276f, player->GetOrientation(), false); return true; } void BattleGroundRV::HandleAreaTrigger(Player * Source, uint32 Trigger) { if (GetStatus() != STATUS_IN_PROGRESS) return; switch(Trigger) { case 5224: case 5226: case 5473: case 5474: break; default: sLog.outError("WARNING: Unhandled AreaTrigger in Battleground: %u", Trigger); Source->GetSession()->SendAreaTriggerMessage("Warning: Unhandled AreaTrigger in Battleground: %u", Trigger); break; } } void BattleGroundRV::FillInitialWorldStates(WorldPacket &data, uint32& count) { FillInitialWorldState(data, count, 0xe11, GetAlivePlayersCountByTeam(ALLIANCE)); FillInitialWorldState(data, count, 0xe10, GetAlivePlayersCountByTeam(HORDE)); FillInitialWorldState(data, count, 0xe1a, 1); } void BattleGroundRV::Reset() { //call parent's class reset BattleGround::Reset(); m_uiTeleport = 22000; } bool BattleGroundRV::SetupBattleGround() { return true; }
gpl-2.0
martinvanzijl/pencil
core_lib/src/interface/timecontrols.cpp
1
8221
/* Pencil - Traditional Animation Software Copyright (C) 2005-2007 Patrick Corrieri & Pascal Naidon Copyright (C) 2012-2018 Matthew Chiawen Chang This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #include "timecontrols.h" #include <QtGui> #include <QLabel> #include "editor.h" #include "playbackmanager.h" #include "layermanager.h" #include "pencildef.h" #include "util.h" #include "preferencemanager.h" #include "timeline.h" TimeControls::TimeControls(TimeLine* parent) : QToolBar(parent) { mTimeline = parent; } void TimeControls::initUI() { QSettings settings(PENCIL2D, PENCIL2D); mFpsBox = new QSpinBox(this); mFpsBox->setFixedHeight(24); mFpsBox->setValue(settings.value("fps").toInt()); mFpsBox->setMinimum(1); mFpsBox->setMaximum(90); mFpsBox->setSuffix(" fps"); mFpsBox->setToolTip(tr("Frames per second")); mFpsBox->setFocusPolicy(Qt::WheelFocus); mLoopStartSpinBox = new QSpinBox(this); mLoopStartSpinBox->setFixedHeight(24); mLoopStartSpinBox->setValue(settings.value("loopStart").toInt()); mLoopStartSpinBox->setMinimum(1); mLoopStartSpinBox->setToolTip(tr("Start of playback loop")); mLoopStartSpinBox->setFocusPolicy(Qt::WheelFocus); mLoopEndSpinBox = new QSpinBox(this); mLoopEndSpinBox->setFixedHeight(24); mLoopEndSpinBox->setValue(settings.value("loopEnd").toInt()); mLoopEndSpinBox->setMinimum(2); mLoopEndSpinBox->setToolTip(tr("End of playback loop")); mLoopEndSpinBox->setFocusPolicy(Qt::WheelFocus); mPlaybackRangeCheckBox = new QCheckBox(tr("Range")); mPlaybackRangeCheckBox->setFixedHeight(24); mPlaybackRangeCheckBox->setToolTip(tr("Playback range")); mPlayButton = new QPushButton(this); mLoopButton = new QPushButton(this); mSoundButton = new QPushButton(this); mJumpToEndButton = new QPushButton(this); mJumpToStartButton = new QPushButton(this); mLoopIcon = QIcon(":icons/controls/loop.png"); mSoundIcon = QIcon(":icons/controls/sound.png"); mJumpToEndIcon = QIcon(":icons/controls/endplay.png"); mJumpToStartIcon = QIcon(":icons/controls/startplay.png"); mStartIcon = QIcon(":icons/controls/play.png"); mStopIcon = QIcon(":icons/controls/stop.png"); mPlayButton->setIcon(mStartIcon); mLoopButton->setIcon(mLoopIcon); mSoundButton->setIcon(mSoundIcon); mJumpToEndButton->setIcon(mJumpToEndIcon); mJumpToStartButton->setIcon(mJumpToStartIcon); mPlayButton->setToolTip(tr("Play")); mLoopButton->setToolTip(tr("Loop")); mSoundButton->setToolTip(tr("Sound on/off")); mJumpToEndButton->setToolTip(tr("End")); mJumpToStartButton->setToolTip(tr("Start")); mLoopButton->setCheckable(true); mSoundButton->setCheckable(true); mSoundButton->setChecked(true); addWidget(mJumpToStartButton); addWidget(mPlayButton); addWidget(mJumpToEndButton); addWidget(mLoopButton); addWidget(mPlaybackRangeCheckBox); addWidget(mLoopStartSpinBox); addWidget(mLoopEndSpinBox); addWidget(mSoundButton); addWidget(mFpsBox); makeConnections(); updateUI(); } void TimeControls::updateUI() { PlaybackManager* playback = mEditor->playback(); mPlaybackRangeCheckBox->setChecked(playback->isRangedPlaybackOn()); // don't block this signal since it enables start/end range spinboxes. SignalBlocker b1(mLoopStartSpinBox); mLoopStartSpinBox->setValue(playback->markInFrame()); SignalBlocker b2(mLoopEndSpinBox); mLoopEndSpinBox->setValue(playback->markOutFrame()); SignalBlocker b3(mFpsBox); mFpsBox->setValue(playback->fps()); SignalBlocker b4(mLoopButton); mLoopButton->setChecked(playback->isLooping()); } void TimeControls::setFps(int value) { SignalBlocker blocker(mFpsBox); mFpsBox->setValue(value); } void TimeControls::toggleLoop(bool checked) { mLoopButton->setChecked(checked); } void TimeControls::toggleLoopControl(bool checked) { mPlaybackRangeCheckBox->setChecked(checked); } void TimeControls::setEditor(Editor* editor) { Q_ASSERT(editor != nullptr); mEditor = editor; } void TimeControls::makeConnections() { connect(mPlayButton, &QPushButton::clicked, this, &TimeControls::playButtonClicked); connect(mJumpToEndButton, &QPushButton::clicked, this, &TimeControls::jumpToEndButtonClicked); connect(mJumpToStartButton, &QPushButton::clicked, this, &TimeControls::jumpToStartButtonClicked); connect(mLoopButton, &QPushButton::clicked, this, &TimeControls::loopButtonClicked); connect(mPlaybackRangeCheckBox, &QCheckBox::clicked, this, &TimeControls::playbackRangeClicked); auto spinBoxValueChanged = static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged); connect(mLoopStartSpinBox, spinBoxValueChanged, this, &TimeControls::loopStartValueChanged); connect(mLoopEndSpinBox, spinBoxValueChanged, this, &TimeControls::loopEndValueChanged); connect(mPlaybackRangeCheckBox, &QCheckBox::toggled, mLoopStartSpinBox, &QSpinBox::setEnabled); connect(mPlaybackRangeCheckBox, &QCheckBox::toggled, mLoopEndSpinBox, &QSpinBox::setEnabled); connect(mSoundButton, &QPushButton::clicked, this, &TimeControls::soundClick); connect(mSoundButton, &QPushButton::clicked, this, &TimeControls::updateSoundIcon); auto connection = connect(mFpsBox, spinBoxValueChanged, this, &TimeControls::fpsClick); if(!connection) { // Use "editingFinished" if the "spinBoxValueChanged" signal doesn't work... connect(mFpsBox, &QSpinBox::editingFinished, this, &TimeControls::onFpsEditingFinished); } } void TimeControls::playButtonClicked() { if (mEditor->playback()->isPlaying()) { mEditor->playback()->stop(); } else { mEditor->playback()->play(); } updatePlayState(); } void TimeControls::updatePlayState() { if (mEditor->playback()->isPlaying()) { mPlayButton->setIcon(mStopIcon); mPlayButton->setToolTip(tr("Stop")); } else { mPlayButton->setIcon(mStartIcon); mPlayButton->setToolTip(tr("Start")); } } void TimeControls::jumpToStartButtonClicked() { if (mPlaybackRangeCheckBox->isChecked()) { mEditor->scrubTo(mLoopStartSpinBox->value()); mEditor->playback()->setCheckForSoundsHalfway(true); } else { mEditor->scrubTo(mEditor->layers()->firstKeyFrameIndex()); } mEditor->playback()->stopSounds(); } void TimeControls::jumpToEndButtonClicked() { if (mPlaybackRangeCheckBox->isChecked()) { mEditor->scrubTo(mLoopEndSpinBox->value()); } else { mEditor->scrubTo(mEditor->layers()->lastKeyFrameIndex()); } } void TimeControls::loopButtonClicked(bool bChecked) { mEditor->playback()->setLooping(bChecked); } void TimeControls::playbackRangeClicked(bool bChecked) { mEditor->playback()->enableRangedPlayback(bChecked); mTimeline->updateLength(); } void TimeControls::loopStartValueChanged(int i) { if (i >= mLoopEndSpinBox->value()) { mLoopEndSpinBox->setValue(i + 1); } mLoopEndSpinBox->setMinimum(i + 1); mEditor->playback()->setRangedStartFrame(i); mTimeline->updateLength(); } void TimeControls::loopEndValueChanged(int i) { mEditor->playback()->setRangedEndFrame(i); mTimeline->updateLength(); } void TimeControls::updateSoundIcon(bool soundEnabled) { if (soundEnabled) { mSoundButton->setIcon(QIcon(":icons/controls/sound.png")); } else { mSoundButton->setIcon(QIcon(":icons/controls/sound-disabled.png")); } } void TimeControls::onFpsEditingFinished() { emit fpsClick(mFpsBox->value()); } void TimeControls::updateLength(int frameLength) { mLoopStartSpinBox->setMaximum(frameLength - 1); mLoopEndSpinBox->setMaximum(frameLength); }
gpl-2.0
fschaper/netcell
core/extensions/function/src/function_nper.cpp
1
3968
/** * \file * Copyright (C) 2006-2010 Jedox AG * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as published * by the Free Software Foundation at http://www.gnu.org/copyleft/gpl.html. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place, Suite 330, Boston, MA 02111-1307 USA * * You may obtain a copy of the License at * * <a href="http://www.jedox.com/license_palo_bi_suite.txt"> * http://www.jedox.com/license_palo_bi_suite.txt * </a> * * If you are developing and distributing open source applications under the * GPL License, then you are free to use Worksheetserver under the GPL License. * For OEMs, ISVs, and VARs who distribute Worksheetserver with their products, * and do not license and distribute their source code under the GPL, Jedox provides * a flexible OEM Commercial License. * * \author * Frieder Hofmann <frieder.hofmann@jedox.com> * Radu Ialovoi <ialovoi@yalos-solutions.com> */ #include "precompiled_header.hpp" /*! * \brief * Returns the number of periods for an investment based on periodic, constant payments and a constant interest rate. * * \param function_parameter& parameters * container of parameters and context which holds information about the current calculation state * * \param base_node& n * AST presentation of the formula currently being calculated. * * \returns * the number of periods * * \details * pv*(1+rate)^nper + pmt*(1+rate*type)*((1+rate)^nper -1)/rate + fv = 0 * If rate is 0, then: (pmt*nper) + pv + fv = 0 * * \see * function_fv | function_ipmt | function_pmt | function_pv | function_rate * * \author * Matthias Roy <matthias.roy@jedox.com> */ interpreter_variant function_nper( function_parameter& parameters ) { if ( parameters.count() < 3 || parameters.count() > 5 ) { // formula-error! We give #N/A until we can give a message-window. return interpreter_variant( variant::error_n_a ); } double rate = parameters[0].numeric(); // the interest rate per period. double pmt = parameters[1].numeric(); // the payment made each period; it cannot change over the life of the annuity. // Typically, pmt contains principal and interest but no other fees or taxes. double pv = parameters[2].numeric(); // the present value, or the lump-sum amount that a series of future payments is worth right now. double fv = 0.0; // the future value, or cash balance you want to attain after the last payment is made. if ( parameters.count() > 3 && parameters[3].type() != variant::type_empty ) { fv = parameters[3].numeric(); } double type = 0.0; // indicates when payments are due. // 0 or omitted: payments are due at the end of the period. // 1: payments are due at the beginning of the period. if ( parameters.count() > 4 ) { type = parameters[4].numeric(); } double result; if ( rate == 0.0 ) { return interpreter_variant( variant( -( pv + fv ) / pmt ) ); } if ( type == 0.0 ) { result = log( ( pmt - fv * rate ) / ( pmt + pv * rate ) ) / log( 1.0 + rate ); } else { result = log( ( -fv * rate + pmt * ( 1.0 + rate ) ) / ( pv * rate + pmt * ( 1.0 + rate ) ) ) / log( 1.0 + rate ); } return interpreter_variant( variant( result ) ); // Returns the number of periods for an investment based on periodic, constant payments and a constant interest rate. }
gpl-2.0
n-soda/linux
fs/binfmt_elf.c
1
38414
/* * linux/fs/binfmt_elf.c * * These are the functions used to load ELF format executables as used * on SVr4 machines. Information on the format may be found in the book * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support * Tools". * * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). */ #include <linux/module.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/a.out.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/binfmts.h> #include <linux/string.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/shm.h> #include <linux/personality.h> #include <linux/elfcore.h> #include <linux/init.h> #include <linux/highuid.h> #include <linux/smp_lock.h> #include <linux/compiler.h> #include <linux/highmem.h> #include <asm/uaccess.h> #include <asm/param.h> #include <asm/pgalloc.h> #define DLINFO_ITEMS 13 #include <linux/elf.h> static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs); static int load_elf_library(struct file*); static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); extern int dump_fpu (struct pt_regs *, elf_fpregset_t *); extern void dump_thread(struct pt_regs *, struct user *); #ifndef elf_addr_t #define elf_addr_t unsigned long #define elf_caddr_t char * #endif /* * If we don't support core dumping, then supply a NULL so we * don't even try. */ #ifdef USE_ELF_CORE_DUMP static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file); #else #define elf_core_dump NULL #endif #if ELF_EXEC_PAGESIZE > PAGE_SIZE # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE #else # define ELF_MIN_ALIGN PAGE_SIZE #endif #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) static struct linux_binfmt elf_format = { NULL, THIS_MODULE, load_elf_binary, load_elf_library, elf_core_dump, ELF_EXEC_PAGESIZE }; #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) static int set_brk(unsigned long start, unsigned long end) { start = ELF_PAGEALIGN(start); end = ELF_PAGEALIGN(end); if (end > start) { unsigned long addr; down_write(&current->mm->mmap_sem); addr = do_brk(start, end - start); up_write(&current->mm->mmap_sem); if (BAD_ADDR(addr)) return addr; } current->mm->start_brk = current->mm->brk = end; return 0; } /* We need to explicitly zero any fractional pages after the data section (i.e. bss). This would contain the junk from the file that should not be in memory */ static void padzero(unsigned long elf_bss) { unsigned long nbyte; nbyte = ELF_PAGEOFFSET(elf_bss); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; clear_user((void *) elf_bss, nbyte); } } static elf_addr_t * create_elf_tables(char *p, int argc, int envc, struct elfhdr * exec, unsigned long load_addr, unsigned long load_bias, unsigned long interp_load_addr, int ibcs) { elf_caddr_t *argv; elf_caddr_t *envp; elf_addr_t *sp, *csp; char *k_platform, *u_platform; long hwcap; size_t platform_len = 0; size_t len; /* * Get hold of platform and hardware capabilities masks for * the machine we are running on. In some cases (Sparc), * this info is impossible to get, in others (i386) it is * merely difficult. */ hwcap = ELF_HWCAP; k_platform = ELF_PLATFORM; if (k_platform) { platform_len = strlen(k_platform) + 1; u_platform = p - platform_len; __copy_to_user(u_platform, k_platform, platform_len); } else u_platform = p; #if defined(__i386__) && defined(CONFIG_SMP) /* * In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions * by the processes running on the same package. One thing we can do * is to shuffle the initial stack for them. * * The conditionals here are unneeded, but kept in to make the * code behaviour the same as pre change unless we have hyperthreaded * processors. This keeps Mr Marcelo Person happier but should be * removed for 2.5 */ if(smp_num_siblings > 1) u_platform = u_platform - ((current->pid % 64) << 7); #endif /* * Force 16 byte _final_ alignment here for generality. */ sp = (elf_addr_t *)(~15UL & (unsigned long)(u_platform)); csp = sp; csp -= (1+DLINFO_ITEMS)*2 + (k_platform ? 2 : 0); #ifdef DLINFO_ARCH_ITEMS csp -= DLINFO_ARCH_ITEMS*2; #endif csp -= envc+1; csp -= argc+1; csp -= (!ibcs ? 3 : 1); /* argc itself */ if ((unsigned long)csp & 15UL) sp -= ((unsigned long)csp & 15UL) / sizeof(*sp); /* * Put the ELF interpreter info on the stack */ #define NEW_AUX_ENT(nr, id, val) \ __put_user ((id), sp+(nr*2)); \ __put_user ((val), sp+(nr*2+1)); \ sp -= 2; NEW_AUX_ENT(0, AT_NULL, 0); if (k_platform) { sp -= 2; NEW_AUX_ENT(0, AT_PLATFORM, (elf_addr_t)(unsigned long) u_platform); } sp -= DLINFO_ITEMS*2; NEW_AUX_ENT( 0, AT_HWCAP, hwcap); NEW_AUX_ENT( 1, AT_PAGESZ, ELF_EXEC_PAGESIZE); NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC); NEW_AUX_ENT( 3, AT_PHDR, load_addr + exec->e_phoff); NEW_AUX_ENT( 4, AT_PHENT, sizeof (struct elf_phdr)); NEW_AUX_ENT( 5, AT_PHNUM, exec->e_phnum); NEW_AUX_ENT( 6, AT_BASE, interp_load_addr); NEW_AUX_ENT( 7, AT_FLAGS, 0); NEW_AUX_ENT( 8, AT_ENTRY, load_bias + exec->e_entry); NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid); NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid); NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid); NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid); #ifdef ARCH_DLINFO /* * ARCH_DLINFO must come last so platform specific code can enforce * special alignment requirements on the AUXV if necessary (eg. PPC). */ ARCH_DLINFO; #endif #undef NEW_AUX_ENT sp -= envc+1; envp = (elf_caddr_t *) sp; sp -= argc+1; argv = (elf_caddr_t *) sp; if (!ibcs) { __put_user((elf_addr_t)(unsigned long) envp,--sp); __put_user((elf_addr_t)(unsigned long) argv,--sp); } __put_user((elf_addr_t)argc,--sp); current->mm->arg_start = current->mm->arg_end = (unsigned long) p; while (argc-->0) { __put_user((elf_caddr_t)(unsigned long)p,argv++); len = strnlen_user(p, PAGE_SIZE*MAX_ARG_PAGES); if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) return NULL; p += len; } __put_user(NULL, argv); current->mm->arg_end = current->mm->env_start = (unsigned long) p; while (envc-->0) { __put_user((elf_caddr_t)(unsigned long)p,envp++); len = strnlen_user(p, PAGE_SIZE*MAX_ARG_PAGES); if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) return NULL; p += len; } __put_user(NULL, envp); current->mm->env_end = (unsigned long) p; return sp; } #ifndef elf_map static inline unsigned long elf_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type) { unsigned long map_addr; down_write(&current->mm->mmap_sem); map_addr = do_mmap(filep, ELF_PAGESTART(addr), eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type, eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr)); up_write(&current->mm->mmap_sem); return(map_addr); } #endif /* !elf_map */ /* This is much more generalized than the library routine read function, so we keep this separate. Technically the library read function is only provided so that we can read a.out libraries that have an ELF header */ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, struct file * interpreter, unsigned long *interp_load_addr) { struct elf_phdr *elf_phdata; struct elf_phdr *eppnt; unsigned long load_addr = 0; int load_addr_set = 0; unsigned long last_bss = 0, elf_bss = 0; unsigned long error = ~0UL; int retval, i, size; /* First of all, some simple consistency checks */ if (interp_elf_ex->e_type != ET_EXEC && interp_elf_ex->e_type != ET_DYN) goto out; if (!elf_check_arch(interp_elf_ex)) goto out; if (!interpreter->f_op || !interpreter->f_op->mmap) goto out; /* * If the size of this structure has changed, then punt, since * we will be doing the wrong thing. */ if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) goto out; if (interp_elf_ex->e_phnum < 1 || interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; /* Now read in all of the header information */ size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum; if (size > ELF_MIN_ALIGN) goto out; elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); if (!elf_phdata) goto out; retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size); error = -EIO; if (retval != size) { if (retval < 0) error = retval; goto out_close; } eppnt = elf_phdata; for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { if (eppnt->p_type == PT_LOAD) { int elf_type = MAP_PRIVATE | MAP_DENYWRITE; int elf_prot = 0; unsigned long vaddr = 0; unsigned long k, map_addr; if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; vaddr = eppnt->p_vaddr; if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) elf_type |= MAP_FIXED; map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type); if (BAD_ADDR(map_addr)) goto out_close; if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { load_addr = map_addr - ELF_PAGESTART(vaddr); load_addr_set = 1; } /* * Check to see if the section's size will overflow the * allowed task size. Note that p_filesz must always be * <= p_memsize so it is only necessary to check p_memsz. */ k = load_addr + eppnt->p_vaddr; if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz || eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) { error = -ENOMEM; goto out_close; } /* * Find the end of the file mapping for this phdr, and keep * track of the largest address we see for this. */ k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; if (k > elf_bss) elf_bss = k; /* * Do the same thing for the memory mapping - between * elf_bss and last_bss is the bss section. */ k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; if (k > last_bss) last_bss = k; } } /* Now use mmap to map the library into memory. */ /* * Now fill out the bss section. First pad the last page up * to the page boundary, and then perform a mmap to make sure * that there are zero-mapped pages up to and including the * last bss page. */ padzero(elf_bss); elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */ /* Map the last of the bss segment */ if (last_bss > elf_bss) { down_write(&current->mm->mmap_sem); error = do_brk(elf_bss, last_bss - elf_bss); up_write(&current->mm->mmap_sem); if (BAD_ADDR(error)) goto out_close; } *interp_load_addr = load_addr; /* * XXX: is everything deallocated properly if this happens * to be ~0UL (that is, we succeeded, but the header is broken * and thus the caller will think that we failed)? We'd better * switch to out-of-band error reporting. */ error = ((unsigned long) interp_elf_ex->e_entry) + load_addr; out_close: kfree(elf_phdata); out: return error; } static unsigned long load_aout_interp(struct exec * interp_ex, struct file * interpreter) { unsigned long text_data, elf_entry = ~0UL; char * addr; loff_t offset; current->mm->end_code = interp_ex->a_text; text_data = interp_ex->a_text + interp_ex->a_data; current->mm->end_data = text_data; current->mm->brk = interp_ex->a_bss + text_data; switch (N_MAGIC(*interp_ex)) { case OMAGIC: offset = 32; addr = (char *) 0; break; case ZMAGIC: case QMAGIC: offset = N_TXTOFF(*interp_ex); addr = (char *) N_TXTADDR(*interp_ex); break; default: goto out; } down_write(&current->mm->mmap_sem); do_brk(0, text_data); up_write(&current->mm->mmap_sem); if (!interpreter->f_op || !interpreter->f_op->read) goto out; if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) goto out; flush_icache_range((unsigned long)addr, (unsigned long)addr + text_data); down_write(&current->mm->mmap_sem); do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), interp_ex->a_bss); up_write(&current->mm->mmap_sem); elf_entry = interp_ex->a_entry; out: return elf_entry; } /* * These are the functions used to load ELF style executables and shared * libraries. There is no binary dependent code anywhere else. */ #define INTERPRETER_NONE 0 #define INTERPRETER_AOUT 1 #define INTERPRETER_ELF 2 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct file *interpreter = NULL; /* to shut gcc up */ unsigned long load_addr = 0, load_bias = 0; int load_addr_set = 0; char * elf_interpreter = NULL; unsigned int interpreter_type = INTERPRETER_NONE; unsigned char ibcs2_interpreter = 0; unsigned long error; struct elf_phdr * elf_ppnt, *elf_phdata; unsigned long elf_bss, k, elf_brk; int elf_exec_fileno; int retval, i; unsigned int size; unsigned long elf_entry, interp_load_addr = 0; unsigned long start_code, end_code, start_data, end_data; unsigned long reloc_func_desc = 0; struct elfhdr elf_ex; struct elfhdr interp_elf_ex; struct exec interp_ex; char passed_fileno[6]; struct files_struct *files; /* Get the exec-header */ elf_ex = *((struct elfhdr *) bprm->buf); retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out; if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) goto out; if (!elf_check_arch(&elf_ex)) goto out; if (!bprm->file->f_op||!bprm->file->f_op->mmap) goto out; /* Now read in all of the header information */ if (elf_ex.e_phentsize != sizeof(struct elf_phdr)) goto out; if (elf_ex.e_phnum < 1 || elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; size = elf_ex.e_phnum * sizeof(struct elf_phdr); retval = -ENOMEM; elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); if (!elf_phdata) goto out; retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size); if (retval != size) { if (retval >= 0) retval = -EIO; goto out_free_ph; } files = current->files; /* Refcounted so ok */ retval = unshare_files(); if (retval < 0) goto out_free_ph; if (files == current->files) { put_files_struct(files); files = NULL; } /* exec will make our files private anyway, but for the a.out loader stuff we need to do it earlier */ retval = get_unused_fd(); if (retval < 0) goto out_free_fh; get_file(bprm->file); fd_install(elf_exec_fileno = retval, bprm->file); elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; start_code = ~0UL; end_code = 0; start_data = 0; end_data = 0; for (i = 0; i < elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { /* This is the program interpreter used for * shared libraries - for now assume that this * is an a.out format binary */ retval = -ENOEXEC; if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) goto out_free_file; retval = -ENOMEM; elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); if (!elf_interpreter) goto out_free_file; retval = kernel_read(bprm->file, elf_ppnt->p_offset, elf_interpreter, elf_ppnt->p_filesz); if (retval != elf_ppnt->p_filesz) { if (retval >= 0) retval = -EIO; goto out_free_interp; } /* make sure path is NULL terminated */ retval = -ENOEXEC; if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') goto out_free_interp; /* If the program interpreter is one of these two, * then assume an iBCS2 image. Otherwise assume * a native linux image. */ if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) ibcs2_interpreter = 1; #if 0 printk("Using ELF interpreter %s\n", elf_interpreter); #endif SET_PERSONALITY(elf_ex, ibcs2_interpreter); interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) goto out_free_interp; retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); if (retval != BINPRM_BUF_SIZE) { if (retval >= 0) retval = -EIO; goto out_free_dentry; } /* Get the exec headers */ interp_ex = *((struct exec *) bprm->buf); interp_elf_ex = *((struct elfhdr *) bprm->buf); break; } elf_ppnt++; } /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; /* Now figure out which format our binary is */ if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && (N_MAGIC(interp_ex) != QMAGIC)) interpreter_type = INTERPRETER_ELF; if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) interpreter_type &= ~INTERPRETER_ELF; retval = -ELIBBAD; if (!interpreter_type) goto out_free_dentry; /* Make sure only one type was selected */ if ((interpreter_type & INTERPRETER_ELF) && interpreter_type != INTERPRETER_ELF) { // FIXME - ratelimit this before re-enabling // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); interpreter_type = INTERPRETER_ELF; } /* Verify the interpreter has a valid arch */ if ((interpreter_type == INTERPRETER_ELF) && !elf_check_arch(&interp_elf_ex)) goto out_free_dentry; } else { /* Executables without an interpreter also need a personality */ SET_PERSONALITY(elf_ex, ibcs2_interpreter); } /* OK, we are done with that, now set up the arg stuff, and then start this sucker up */ if (!bprm->sh_bang) { char * passed_p; if (interpreter_type == INTERPRETER_AOUT) { sprintf(passed_fileno, "%d", elf_exec_fileno); passed_p = passed_fileno; if (elf_interpreter) { retval = copy_strings_kernel(1,&passed_p,bprm); if (retval) goto out_free_dentry; bprm->argc++; } } } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* Discard our unneeded old files struct */ if (files) { steal_locks(files); put_files_struct(files); files = NULL; } /* OK, This is the point of no return */ current->mm->start_data = 0; current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; elf_entry = (unsigned long) elf_ex.e_entry; /* Do this so that we can load the interpreter, if need be. We will change some of these later */ current->mm->rss = 0; retval = setup_arg_pages(bprm); if (retval < 0) { send_sig(SIGKILL, current, 0); return retval; } current->mm->start_stack = bprm->p; /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. At this point, we assume that the image should be loaded at fixed address, not at a variable address. */ for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long vaddr; if (elf_ppnt->p_type != PT_LOAD) continue; if (unlikely (elf_brk > elf_bss)) { unsigned long nbyte; /* There was a PT_LOAD segment with p_memsz > p_filesz before this one. Map anonymous pages, if needed, and clear the area. */ retval = set_brk (elf_bss + load_bias, elf_brk + load_bias); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } nbyte = ELF_PAGEOFFSET(elf_bss); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; if (nbyte > elf_brk - elf_bss) nbyte = elf_brk - elf_bss; clear_user((void *) elf_bss + load_bias, nbyte); } } if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; if (elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (elf_ex.e_type == ET_DYN) { /* Try and get dynamic programs out of the way of the default mmap base, as well as whatever program they might try to exec. This is because the brk will follow the loader, and is not movable. */ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags); if (BAD_ADDR(error)) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); if (elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += load_bias; reloc_func_desc = load_addr; } } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; if (start_data < k) start_data = k; /* * Check to see if the section's size will overflow the * allowed task size. Note that p_filesz must always be * <= p_memsz so it is only necessary to check p_memsz. */ if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz || elf_ppnt->p_memsz > TASK_SIZE || TASK_SIZE - elf_ppnt->p_memsz < k) { /* set_brk can never work. Avoid overflows. */ send_sig(SIGKILL, current, 0); goto out_free_dentry; } k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if (k > elf_bss) elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if (k > elf_brk) elf_brk = k; } elf_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; start_data += load_bias; end_data += load_bias; /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections. We must do this before * mapping in the interpreter, to make sure it doesn't wind * up getting placed where the bss needs to go. */ retval = set_brk(elf_bss, elf_brk); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } padzero(elf_bss); if (elf_interpreter) { if (interpreter_type == INTERPRETER_AOUT) elf_entry = load_aout_interp(&interp_ex, interpreter); else elf_entry = load_elf_interp(&interp_elf_ex, interpreter, &interp_load_addr); if (BAD_ADDR(elf_entry)) { printk(KERN_ERR "Unable to load interpreter %.128s\n", elf_interpreter); force_sig(SIGSEGV, current); retval = -ENOEXEC; /* Nobody gets to see this, but.. */ goto out_free_dentry; } reloc_func_desc = interp_load_addr; allow_write_access(interpreter); fput(interpreter); kfree(elf_interpreter); } kfree(elf_phdata); if (interpreter_type != INTERPRETER_AOUT) sys_close(elf_exec_fileno); set_binfmt(&elf_format); compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; bprm->p = (unsigned long) create_elf_tables((char *)bprm->p, bprm->argc, bprm->envc, &elf_ex, load_addr, load_bias, interp_load_addr, (interpreter_type == INTERPRETER_AOUT ? 0 : 1)); /* N.B. passed_fileno might not be initialized? */ if (interpreter_type == INTERPRETER_AOUT) current->mm->arg_start += strlen(passed_fileno) + 1; current->mm->start_brk = current->mm->brk = elf_brk; current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->start_data = start_data; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; #if 0 printk("(start_brk) %lx\n" , (long) current->mm->start_brk); printk("(end_code) %lx\n" , (long) current->mm->end_code); printk("(start_code) %lx\n" , (long) current->mm->start_code); printk("(start_data) %lx\n" , (long) current->mm->start_data); printk("(end_data) %lx\n" , (long) current->mm->end_data); printk("(start_stack) %lx\n" , (long) current->mm->start_stack); printk("(brk) %lx\n" , (long) current->mm->brk); #endif if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ /* N.B. Shouldn't the size here be PAGE_SIZE?? */ down_write(&current->mm->mmap_sem); error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(&current->mm->mmap_sem); } #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. In addition, it may also specify (eg, PowerPC64 ELF) * that the e_entry field is the address of the function descriptor * for the startup routine, rather than the address of the startup * routine itself. This macro performs whatever initialization to * the regs structure is required as well as any relocations to the * function descriptor entries when executing dynamically linked apps. */ ELF_PLAT_INIT(regs, reloc_func_desc); #endif start_thread(regs, elf_entry, bprm->p); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); retval = 0; out: return retval; /* error cleanup */ out_free_dentry: allow_write_access(interpreter); if (interpreter) fput(interpreter); out_free_interp: if (elf_interpreter) kfree(elf_interpreter); out_free_file: sys_close(elf_exec_fileno); out_free_fh: if (files) { put_files_struct(current->files); current->files = files; } out_free_ph: kfree(elf_phdata); goto out; } /* This is really simpleminded and specialized - we are loading an a.out library that is given an ELF header. */ static int load_elf_library(struct file *file) { struct elf_phdr *elf_phdata; struct elf_phdr *eppnt; unsigned long elf_bss, bss, len; int retval, error, i, j; struct elfhdr elf_ex; error = -ENOEXEC; retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex)); if (retval != sizeof(elf_ex)) goto out; if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out; /* First of all, some simple consistency checks */ if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap) goto out; /* Now read in all of the header information */ j = sizeof(struct elf_phdr) * elf_ex.e_phnum; /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ error = -ENOMEM; elf_phdata = kmalloc(j, GFP_KERNEL); if (!elf_phdata) goto out; eppnt = elf_phdata; error = -ENOEXEC; retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j); if (retval != j) goto out_free_ph; for (j = 0, i = 0; i<elf_ex.e_phnum; i++) if ((eppnt + i)->p_type == PT_LOAD) j++; if (j != 1) goto out_free_ph; while (eppnt->p_type != PT_LOAD) eppnt++; /* Now use mmap to map the library into memory. */ down_write(&current->mm->mmap_sem); error = do_mmap(file, ELF_PAGESTART(eppnt->p_vaddr), (eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr)), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, (eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr))); up_write(&current->mm->mmap_sem); if (error != ELF_PAGESTART(eppnt->p_vaddr)) goto out_free_ph; elf_bss = eppnt->p_vaddr + eppnt->p_filesz; padzero(elf_bss); len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1); bss = eppnt->p_memsz + eppnt->p_vaddr; if (bss > len) { down_write(&current->mm->mmap_sem); do_brk(len, bss - len); up_write(&current->mm->mmap_sem); } error = 0; out_free_ph: kfree(elf_phdata); out: return error; } /* * Note that some platforms still use traditional core dumps and not * the ELF core dump. Each platform can select it as appropriate. */ #ifdef USE_ELF_CORE_DUMP /* * ELF core dumper * * Modelled on fs/exec.c:aout_core_dump() * Jeremy Fitzhardinge <jeremy@sw.oz.au> */ /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. */ static int dump_write(struct file *file, const void *addr, int nr) { return file->f_op->write(file, addr, nr, &file->f_pos) == nr; } static int dump_seek(struct file *file, off_t off) { if (file->f_op->llseek) { if (file->f_op->llseek(file, off, 0) != off) return 0; } else file->f_pos = off; return 1; } /* * Decide whether a segment is worth dumping; default is yes to be * sure (missing info is worse than too much; etc). * Personally I'd include everything, and use the coredump limit... * * I think we should skip something. But I am not sure how. H.J. */ static inline int maydump(struct vm_area_struct *vma) { /* * If we may not read the contents, don't allow us to dump * them either. "dump_write()" can't handle it anyway. */ if (!(vma->vm_flags & VM_READ)) return 0; /* Do not dump I/O mapped devices! -DaveM */ if (vma->vm_flags & VM_IO) return 0; #if 1 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN)) return 1; if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED)) return 0; #endif return 1; } #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* An ELF note in memory */ struct memelfnote { const char *name; int type; unsigned int datasz; void *data; }; static int notesize(struct memelfnote *en) { int sz; sz = sizeof(struct elf_note); sz += roundup(strlen(en->name), 4); sz += roundup(en->datasz, 4); return sz; } /* #define DEBUG */ #ifdef DEBUG static void dump_regs(const char *str, elf_greg_t *r) { int i; static const char *regs[] = { "ebx", "ecx", "edx", "esi", "edi", "ebp", "eax", "ds", "es", "fs", "gs", "orig_eax", "eip", "cs", "efl", "uesp", "ss"}; printk("Registers: %s\n", str); for(i = 0; i < ELF_NGREG; i++) { unsigned long val = r[i]; printk(" %-2d %-5s=%08lx %lu\n", i, regs[i], val, val); } } #endif #define DUMP_WRITE(addr, nr) \ do { if (!dump_write(file, (addr), (nr))) return 0; } while(0) #define DUMP_SEEK(off) \ do { if (!dump_seek(file, (off))) return 0; } while(0) static int writenote(struct memelfnote *men, struct file *file) { struct elf_note en; en.n_namesz = strlen(men->name); en.n_descsz = men->datasz; en.n_type = men->type; DUMP_WRITE(&en, sizeof(en)); DUMP_WRITE(men->name, en.n_namesz); /* XXX - cast from long long to long to avoid need for libgcc.a */ DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ DUMP_WRITE(men->data, men->datasz); DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ return 1; } #undef DUMP_WRITE #undef DUMP_SEEK #define DUMP_WRITE(addr, nr) \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ goto end_coredump; #define DUMP_SEEK(off) \ if (!dump_seek(file, (off))) \ goto end_coredump; /* * Actual dumper * * This is a two-pass process; first we find the offsets of the bits, * and then they are actually written out. If we run out of core limit * we just truncate. */ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) { int has_dumped = 0; mm_segment_t fs; int segs; size_t size = 0; int i; struct vm_area_struct *vma; struct elfhdr elf; off_t offset = 0, dataoff; unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur; int numnote = 4; struct memelfnote notes[4]; struct elf_prstatus prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ /* first copy the parameters from user space */ memset(&psinfo, 0, sizeof(psinfo)); { unsigned int i, len; len = current->mm->arg_end - current->mm->arg_start; if (len >= ELF_PRARGSZ) len = ELF_PRARGSZ-1; copy_from_user(&psinfo.pr_psargs, (const char *)current->mm->arg_start, len); for(i = 0; i < len; i++) if (psinfo.pr_psargs[i] == 0) psinfo.pr_psargs[i] = ' '; psinfo.pr_psargs[len] = 0; } memset(&prstatus, 0, sizeof(prstatus)); /* * This transfers the registers from regs into the standard * coredump arrangement, whatever that is. */ #ifdef ELF_CORE_COPY_REGS ELF_CORE_COPY_REGS(prstatus.pr_reg, regs) #else if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) { printk("sizeof(elf_gregset_t) (%ld) != sizeof(struct pt_regs) (%ld)\n", (long)sizeof(elf_gregset_t), (long)sizeof(struct pt_regs)); } else *(struct pt_regs *)&prstatus.pr_reg = *regs; #endif /* now stop all vm operations */ down_write(&current->mm->mmap_sem); segs = current->mm->map_count; #ifdef DEBUG printk("elf_core_dump: %d segs %lu limit\n", segs, limit); #endif /* Set up header */ memcpy(elf.e_ident, ELFMAG, SELFMAG); elf.e_ident[EI_CLASS] = ELF_CLASS; elf.e_ident[EI_DATA] = ELF_DATA; elf.e_ident[EI_VERSION] = EV_CURRENT; memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); elf.e_type = ET_CORE; elf.e_machine = ELF_ARCH; elf.e_version = EV_CURRENT; elf.e_entry = 0; elf.e_phoff = sizeof(elf); elf.e_shoff = 0; elf.e_flags = 0; elf.e_ehsize = sizeof(elf); elf.e_phentsize = sizeof(struct elf_phdr); elf.e_phnum = segs+1; /* Include notes */ elf.e_shentsize = 0; elf.e_shnum = 0; elf.e_shstrndx = 0; fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; current->flags |= PF_DUMPCORE; DUMP_WRITE(&elf, sizeof(elf)); offset += sizeof(elf); /* Elf header */ offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */ /* * Set up the notes in similar form to SVR4 core dumps made * with info from their /proc. */ notes[0].name = "CORE"; notes[0].type = NT_PRSTATUS; notes[0].datasz = sizeof(prstatus); notes[0].data = &prstatus; prstatus.pr_info.si_signo = prstatus.pr_cursig = signr; prstatus.pr_sigpend = current->pending.signal.sig[0]; prstatus.pr_sighold = current->blocked.sig[0]; psinfo.pr_pid = prstatus.pr_pid = current->pid; psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid; psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp; psinfo.pr_sid = prstatus.pr_sid = current->session; prstatus.pr_utime.tv_sec = CT_TO_SECS(current->times.tms_utime); prstatus.pr_utime.tv_usec = CT_TO_USECS(current->times.tms_utime); prstatus.pr_stime.tv_sec = CT_TO_SECS(current->times.tms_stime); prstatus.pr_stime.tv_usec = CT_TO_USECS(current->times.tms_stime); prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->times.tms_cutime); prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->times.tms_cutime); prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->times.tms_cstime); prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->times.tms_cstime); #ifdef DEBUG dump_regs("Passed in regs", (elf_greg_t *)regs); dump_regs("prstatus regs", (elf_greg_t *)&prstatus.pr_reg); #endif notes[1].name = "CORE"; notes[1].type = NT_PRPSINFO; notes[1].datasz = sizeof(psinfo); notes[1].data = &psinfo; i = current->state ? ffz(~current->state) + 1 : 0; psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; psinfo.pr_nice = current->nice; psinfo.pr_flag = current->flags; psinfo.pr_uid = NEW_TO_OLD_UID(current->uid); psinfo.pr_gid = NEW_TO_OLD_GID(current->gid); strncpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); notes[2].name = "CORE"; notes[2].type = NT_TASKSTRUCT; notes[2].datasz = sizeof(*current); notes[2].data = current; /* Try to dump the FPU. */ prstatus.pr_fpvalid = dump_fpu (regs, &fpu); if (!prstatus.pr_fpvalid) { numnote--; } else { notes[3].name = "CORE"; notes[3].type = NT_PRFPREG; notes[3].datasz = sizeof(fpu); notes[3].data = &fpu; } /* Write notes phdr entry */ { struct elf_phdr phdr; int sz = 0; for(i = 0; i < numnote; i++) sz += notesize(&notes[i]); phdr.p_type = PT_NOTE; phdr.p_offset = offset; phdr.p_vaddr = 0; phdr.p_paddr = 0; phdr.p_filesz = sz; phdr.p_memsz = 0; phdr.p_flags = 0; phdr.p_align = 0; offset += phdr.p_filesz; DUMP_WRITE(&phdr, sizeof(phdr)); } /* Page-align dumped data */ dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); /* Write program headers for segments dump */ for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { struct elf_phdr phdr; size_t sz; sz = vma->vm_end - vma->vm_start; phdr.p_type = PT_LOAD; phdr.p_offset = offset; phdr.p_vaddr = vma->vm_start; phdr.p_paddr = 0; phdr.p_filesz = maydump(vma) ? sz : 0; phdr.p_memsz = sz; offset += phdr.p_filesz; phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W; if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X; phdr.p_align = ELF_EXEC_PAGESIZE; DUMP_WRITE(&phdr, sizeof(phdr)); } for(i = 0; i < numnote; i++) if (!writenote(&notes[i], file)) goto end_coredump; DUMP_SEEK(dataoff); for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { unsigned long addr; if (!maydump(vma)) continue; #ifdef DEBUG printk("elf_core_dump: writing %08lx-%08lx\n", vma->vm_start, vma->vm_end); #endif for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { struct page* page; struct vm_area_struct *vma; if (get_user_pages(current, current->mm, addr, 1, 0, 1, &page, &vma) <= 0) { DUMP_SEEK (file->f_pos + PAGE_SIZE); } else { if (page == ZERO_PAGE(addr)) { DUMP_SEEK (file->f_pos + PAGE_SIZE); } else { void *kaddr; flush_cache_page(vma, addr); kaddr = kmap(page); DUMP_WRITE(kaddr, PAGE_SIZE); flush_page_to_ram(page); kunmap(page); } put_page(page); } } } if ((off_t) file->f_pos != offset) { /* Sanity check */ printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", (off_t) file->f_pos, offset); } end_coredump: set_fs(fs); up_write(&current->mm->mmap_sem); return has_dumped; } #endif /* USE_ELF_CORE_DUMP */ static int __init init_elf_binfmt(void) { return register_binfmt(&elf_format); } static void __exit exit_elf_binfmt(void) { /* Remove the COFF and ELF loaders. */ unregister_binfmt(&elf_format); } module_init(init_elf_binfmt) module_exit(exit_elf_binfmt) MODULE_LICENSE("GPL");
gpl-2.0
kevinzhang1986/xvisor
arch/arm/cpu/arm32/cpu_vcpu_helper.c
1
27117
/** * Copyright (c) 2011 Anup Patel. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * @file cpu_vcpu_helper.c * @author Anup Patel (anup@brainfault.org) * @brief source of VCPU helper functions */ #include <vmm_error.h> #include <vmm_heap.h> #include <vmm_stdio.h> #include <vmm_host_aspace.h> #include <vmm_manager.h> #include <libs/stringlib.h> #include <libs/mathlib.h> #include <cpu_defines.h> #include <cpu_inline_asm.h> #include <cpu_vcpu_vfp.h> #include <cpu_vcpu_cp14.h> #include <cpu_vcpu_cp15.h> #include <cpu_vcpu_helper.h> #include <arm_features.h> void cpu_vcpu_halt(struct vmm_vcpu *vcpu, arch_regs_t *regs) { if (!vcpu || !regs) { return; } if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) { vmm_printf("\n"); cpu_vcpu_dump_user_reg(vcpu, regs); vmm_manager_vcpu_halt(vcpu); } } u32 cpu_vcpu_cpsr_retrieve(struct vmm_vcpu *vcpu, arch_regs_t *regs) { if (!vcpu || !regs) { return 0; } if (vcpu->is_normal) { return (regs->cpsr & CPSR_USERBITS_MASK) | (arm_priv(vcpu)->cpsr & ~CPSR_USERBITS_MASK); } else { return regs->cpsr; } } static void cpu_vcpu_banked_regs_save(struct arm_priv *p, arch_regs_t *src) { switch (p->cpsr & CPSR_MODE_MASK) { case CPSR_MODE_USER: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_usr = src->sp; p->lr_usr = src->lr; break; case CPSR_MODE_SYSTEM: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_usr = src->sp; p->lr_usr = src->lr; break; case CPSR_MODE_ABORT: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_abt = src->sp; p->lr_abt = src->lr; break; case CPSR_MODE_UNDEFINED: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_und = src->sp; p->lr_und = src->lr; break; case CPSR_MODE_MONITOR: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_mon = src->sp; p->lr_mon = src->lr; break; case CPSR_MODE_SUPERVISOR: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_svc = src->sp; p->lr_svc = src->lr; break; case CPSR_MODE_IRQ: p->gpr_usr[0] = src->gpr[8]; p->gpr_usr[1] = src->gpr[9]; p->gpr_usr[2] = src->gpr[10]; p->gpr_usr[3] = src->gpr[11]; p->gpr_usr[4] = src->gpr[12]; p->sp_irq = src->sp; p->lr_irq = src->lr; break; case CPSR_MODE_FIQ: p->gpr_fiq[0] = src->gpr[8]; p->gpr_fiq[1] = src->gpr[9]; p->gpr_fiq[2] = src->gpr[10]; p->gpr_fiq[3] = src->gpr[11]; p->gpr_fiq[4] = src->gpr[12]; p->sp_fiq = src->sp; p->lr_fiq = src->lr; break; default: break; }; } static void cpu_vcpu_banked_regs_restore(struct arm_priv *p, arch_regs_t *dst) { switch (p->cpsr & CPSR_MODE_MASK) { case CPSR_MODE_USER: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_usr; dst->lr = p->lr_usr; break; case CPSR_MODE_SYSTEM: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_usr; dst->lr = p->lr_usr; break; case CPSR_MODE_ABORT: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_abt; dst->lr = p->lr_abt; break; case CPSR_MODE_UNDEFINED: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_und; dst->lr = p->lr_und; break; case CPSR_MODE_MONITOR: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_mon; dst->lr = p->lr_mon; break; case CPSR_MODE_SUPERVISOR: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_svc; dst->lr = p->lr_svc; break; case CPSR_MODE_IRQ: dst->gpr[8] = p->gpr_usr[0]; dst->gpr[9] = p->gpr_usr[1]; dst->gpr[10] = p->gpr_usr[2]; dst->gpr[11] = p->gpr_usr[3]; dst->gpr[12] = p->gpr_usr[4]; dst->sp = p->sp_irq; dst->lr = p->lr_irq; break; case CPSR_MODE_FIQ: dst->gpr[8] = p->gpr_fiq[0]; dst->gpr[9] = p->gpr_fiq[1]; dst->gpr[10] = p->gpr_fiq[2]; dst->gpr[11] = p->gpr_fiq[3]; dst->gpr[12] = p->gpr_fiq[4]; dst->sp = p->sp_fiq; dst->lr = p->lr_fiq; break; default: break; }; } void cpu_vcpu_cpsr_update(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 new_cpsr, u32 new_cpsr_mask) { bool mode_change; struct arm_priv *p; /* Sanity check */ if (!vcpu || !vcpu->is_normal || !regs) { return; } p = arm_priv(vcpu); new_cpsr &= new_cpsr_mask; /* Determine if mode is changing */ mode_change = FALSE; if ((new_cpsr_mask & CPSR_MODE_MASK) && ((p->cpsr & CPSR_MODE_MASK) != (new_cpsr & CPSR_MODE_MASK))) { mode_change = TRUE; /* Save banked registers for old CPSR */ cpu_vcpu_banked_regs_save(p, regs); } /* Set the new priviledged bits of CPSR */ p->cpsr &= (~CPSR_PRIVBITS_MASK | ~new_cpsr_mask); p->cpsr |= new_cpsr & CPSR_PRIVBITS_MASK & new_cpsr_mask; /* Set the new user bits of CPSR */ regs->cpsr &= (~CPSR_USERBITS_MASK | ~new_cpsr_mask); regs->cpsr |= new_cpsr & CPSR_USERBITS_MASK & new_cpsr_mask; /* If mode is changing then */ if (mode_change) { /* Restore values of banked registers for new CPSR */ cpu_vcpu_banked_regs_restore(p, regs); /* Synchronize CP15 state to change in mode */ cpu_vcpu_cp15_sync_cpsr(vcpu); } return; } u32 cpu_vcpu_spsr_retrieve(struct vmm_vcpu *vcpu) { struct arm_priv *p = arm_priv(vcpu); /* Find out correct SPSR */ switch (p->cpsr & CPSR_MODE_MASK) { case CPSR_MODE_ABORT: return p->spsr_abt; case CPSR_MODE_UNDEFINED: return p->spsr_und; case CPSR_MODE_MONITOR: return p->spsr_mon; case CPSR_MODE_SUPERVISOR: return p->spsr_svc; case CPSR_MODE_IRQ: return p->spsr_irq; case CPSR_MODE_FIQ: return p->spsr_fiq; default: break; }; return 0; } int cpu_vcpu_spsr_update(struct vmm_vcpu *vcpu, u32 new_spsr, u32 new_spsr_mask) { struct arm_priv *p; /* Sanity check */ if (!vcpu || !vcpu->is_normal) { return VMM_EFAIL; } p = arm_priv(vcpu); /* VCPU cannot be in user mode */ if ((p->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) { return VMM_EFAIL; } new_spsr &= new_spsr_mask; /* Update appropriate SPSR */ switch (p->cpsr & CPSR_MODE_MASK) { case CPSR_MODE_ABORT: p->spsr_abt &= ~new_spsr_mask; p->spsr_abt |= new_spsr; break; case CPSR_MODE_UNDEFINED: p->spsr_und &= ~new_spsr_mask; p->spsr_und |= new_spsr; break; case CPSR_MODE_MONITOR: p->spsr_mon &= ~new_spsr_mask; p->spsr_mon |= new_spsr; break; case CPSR_MODE_SUPERVISOR: p->spsr_svc &= ~new_spsr_mask; p->spsr_svc |= new_spsr; break; case CPSR_MODE_IRQ: p->spsr_irq &= ~new_spsr_mask; p->spsr_irq |= new_spsr; break; case CPSR_MODE_FIQ: p->spsr_fiq &= ~new_spsr_mask; p->spsr_fiq |= new_spsr; break; default: break; }; /* Return success */ return VMM_OK; } u32 cpu_vcpu_reg_read(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 reg_num) { switch (reg_num) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10: case 11: case 12: return regs->gpr[reg_num]; break; case 13: return regs->sp; break; case 14: return regs->lr; break; case 15: return regs->pc; break; default: break; }; return 0x0; } void cpu_vcpu_reg_write(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 reg_num, u32 reg_val) { struct arm_priv *p = arm_priv(vcpu); u32 curmode = p->cpsr & CPSR_MODE_MASK; switch (reg_num) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: regs->gpr[reg_num] = reg_val; break; case 8: case 9: case 10: case 11: case 12: regs->gpr[reg_num] = reg_val; if (curmode == CPSR_MODE_FIQ) { p->gpr_fiq[reg_num - 8] = reg_val; } else { p->gpr_usr[reg_num - 8] = reg_val; } break; case 13: regs->sp = reg_val; switch (curmode) { case CPSR_MODE_USER: case CPSR_MODE_SYSTEM: p->sp_usr = reg_val; break; case CPSR_MODE_FIQ: p->sp_fiq = reg_val; break; case CPSR_MODE_IRQ: p->sp_irq = reg_val; break; case CPSR_MODE_SUPERVISOR: p->sp_svc = reg_val; break; case CPSR_MODE_ABORT: p->sp_abt = reg_val; break; case CPSR_MODE_UNDEFINED: p->sp_und = reg_val; break; case CPSR_MODE_MONITOR: p->sp_mon = reg_val; break; default: break; }; break; case 14: regs->lr = reg_val; switch (curmode) { case CPSR_MODE_USER: case CPSR_MODE_SYSTEM: p->lr_usr = reg_val; break; case CPSR_MODE_FIQ: p->lr_fiq = reg_val; break; case CPSR_MODE_IRQ: p->lr_irq = reg_val; break; case CPSR_MODE_SUPERVISOR: p->lr_svc = reg_val; break; case CPSR_MODE_ABORT: p->lr_abt = reg_val; break; case CPSR_MODE_UNDEFINED: p->lr_und = reg_val; break; case CPSR_MODE_MONITOR: p->lr_mon = reg_val; break; default: break; }; break; case 15: regs->pc = reg_val; break; default: break; }; } u32 cpu_vcpu_regmode_read(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 mode, u32 reg_num) { struct arm_priv *p = arm_priv(vcpu); u32 curmode = p->cpsr & CPSR_MODE_MASK; if (mode == curmode) { return cpu_vcpu_reg_read(vcpu, regs, reg_num); } else { switch (reg_num) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: return regs->gpr[reg_num]; break; case 8: case 9: case 10: case 11: case 12: if (curmode == CPSR_MODE_FIQ) { return p->gpr_usr[reg_num - 8]; } else { if (mode == CPSR_MODE_FIQ) { return p->gpr_fiq[reg_num - 8]; } else { return regs->gpr[reg_num]; } } break; case 13: switch (mode) { case CPSR_MODE_USER: case CPSR_MODE_SYSTEM: return p->sp_usr; break; case CPSR_MODE_FIQ: return p->sp_fiq; break; case CPSR_MODE_IRQ: return p->sp_irq; break; case CPSR_MODE_SUPERVISOR: return p->sp_svc; break; case CPSR_MODE_ABORT: return p->sp_abt; break; case CPSR_MODE_UNDEFINED: return p->sp_und; break; case CPSR_MODE_MONITOR: return p->sp_mon; break; default: break; }; break; case 14: switch (mode) { case CPSR_MODE_USER: case CPSR_MODE_SYSTEM: return p->lr_usr; break; case CPSR_MODE_FIQ: return p->lr_fiq; break; case CPSR_MODE_IRQ: return p->lr_irq; break; case CPSR_MODE_SUPERVISOR: return p->lr_svc; break; case CPSR_MODE_ABORT: return p->lr_abt; break; case CPSR_MODE_UNDEFINED: return p->lr_und; break; case CPSR_MODE_MONITOR: return p->lr_mon; break; default: break; }; break; case 15: return regs->pc; break; default: break; }; } return 0x0; } void cpu_vcpu_regmode_write(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 mode, u32 reg_num, u32 reg_val) { struct arm_priv *p = arm_priv(vcpu); u32 curmode = p->cpsr & CPSR_MODE_MASK; if (mode == curmode) { cpu_vcpu_reg_write(vcpu, regs, reg_num, reg_val); } else { switch (reg_num) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: regs->gpr[reg_num] = reg_val; break; case 8: case 9: case 10: case 11: case 12: if (curmode == CPSR_MODE_FIQ) { p->gpr_usr[reg_num - 8] = reg_val; } else { if (mode == CPSR_MODE_FIQ) { p->gpr_fiq[reg_num - 8] = reg_val; } else { regs->gpr[reg_num] = reg_val; } } break; case 13: switch (mode) { case CPSR_MODE_USER: case CPSR_MODE_SYSTEM: p->sp_usr = reg_val; break; case CPSR_MODE_FIQ: p->sp_fiq = reg_val; break; case CPSR_MODE_IRQ: p->sp_irq = reg_val; break; case CPSR_MODE_SUPERVISOR: p->sp_svc = reg_val; break; case CPSR_MODE_ABORT: p->sp_abt = reg_val; break; case CPSR_MODE_UNDEFINED: p->sp_und = reg_val; break; case CPSR_MODE_MONITOR: p->sp_mon = reg_val; break; default: break; }; break; case 14: switch (mode) { case CPSR_MODE_USER: case CPSR_MODE_SYSTEM: p->lr_usr = reg_val; break; case CPSR_MODE_FIQ: p->lr_fiq = reg_val; break; case CPSR_MODE_IRQ: p->lr_irq = reg_val; break; case CPSR_MODE_SUPERVISOR: p->lr_svc = reg_val; break; case CPSR_MODE_ABORT: p->lr_abt = reg_val; break; case CPSR_MODE_UNDEFINED: p->lr_und = reg_val; break; case CPSR_MODE_MONITOR: p->lr_mon = reg_val; break; default: break; }; break; case 15: regs->pc = reg_val; break; default: break; }; } } int arch_guest_init(struct vmm_guest *guest) { int rc; u32 ovect_flags; virtual_addr_t ovect_va; struct cpu_page pg; if (!guest->reset_count) { guest->arch_priv = vmm_zalloc(sizeof(struct arm_guest_priv)); if (!guest->arch_priv) { rc = VMM_EFAIL; goto fail; } ovect_flags = 0x0; ovect_flags |= VMM_MEMORY_READABLE; ovect_flags |= VMM_MEMORY_WRITEABLE; ovect_flags |= VMM_MEMORY_CACHEABLE; ovect_flags |= VMM_MEMORY_EXECUTABLE; ovect_va = vmm_host_alloc_pages(1, ovect_flags); if (!ovect_va) { rc = VMM_EFAIL; goto fail; } if ((rc = cpu_mmu_get_reserved_page(ovect_va, &pg))) { goto fail_freepages; } if ((rc = cpu_mmu_unmap_reserved_page(&pg))) { goto fail_freepages; } #if defined(CONFIG_ARMV5) pg.ap = TTBL_AP_SRW_UR; #else if (pg.ap == TTBL_AP_SR_U) { pg.ap = TTBL_AP_SR_UR; } else { pg.ap = TTBL_AP_SRW_UR; } #endif if ((rc = cpu_mmu_map_reserved_page(&pg))) { goto fail_freepages; } arm_guest_priv(guest)->ovect = (u32 *)ovect_va; if (vmm_devtree_read_u32(guest->node, "psci_version", &arm_guest_priv(guest)->psci_version)) { /* By default, assume PSCI v0.1 */ arm_guest_priv(guest)->psci_version = 1; } } return VMM_OK; fail_freepages: if (arm_guest_priv(guest)->ovect) { vmm_host_free_pages( (virtual_addr_t)arm_guest_priv(guest)->ovect, 1); } fail: return rc; } int arch_guest_deinit(struct vmm_guest *guest) { int rc; if (guest->arch_priv) { if (arm_guest_priv(guest)->ovect) { rc = vmm_host_free_pages( (virtual_addr_t)arm_guest_priv(guest)->ovect, 1); if (rc) { return rc; } } vmm_free(guest->arch_priv); } return VMM_OK; } int arch_guest_add_region(struct vmm_guest *guest, struct vmm_region *region) { return VMM_OK; } int arch_guest_del_region(struct vmm_guest *guest, struct vmm_region *region) { return VMM_OK; } int arch_vcpu_init(struct vmm_vcpu *vcpu) { int rc; u32 ite, cpuid; const char *attr; /* Initialize User Mode Registers */ /* For both Orphan & Normal VCPUs */ memset(arm_regs(vcpu), 0, sizeof(arch_regs_t)); arm_regs(vcpu)->pc = vcpu->start_pc; arm_regs(vcpu)->sp_excp = vcpu->stack_va + vcpu->stack_sz - 4; if (vcpu->is_normal) { arm_regs(vcpu)->cpsr = CPSR_ZERO_MASK; arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED; arm_regs(vcpu)->cpsr |= CPSR_MODE_USER; arm_regs(vcpu)->sp = 0; } else { arm_regs(vcpu)->cpsr = CPSR_ZERO_MASK; arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED; arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR; arm_regs(vcpu)->sp = arm_regs(vcpu)->sp_excp; } /* Initialize Supervisor Mode Registers */ /* For only Normal VCPUs */ if (!vcpu->is_normal) { return VMM_OK; } rc = vmm_devtree_read_string(vcpu->node, VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr); if (rc) { goto fail; } if (strcmp(attr, "armv5te,arm926ej") == 0) { cpuid = ARM_CPUID_ARM926; } else if (strcmp(attr, "armv6,arm11mp") == 0) { cpuid = ARM_CPUID_ARM11MPCORE; } else if (strcmp(attr, "armv7a,cortex-a8") == 0) { cpuid = ARM_CPUID_CORTEXA8; } else if (strcmp(attr, "armv7a,cortex-a9") == 0) { cpuid = ARM_CPUID_CORTEXA9; } else { rc = VMM_EINVALID; goto fail; } if (!vcpu->reset_count) { vcpu->arch_priv = vmm_zalloc(sizeof(struct arm_priv)); arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | CPSR_IRQ_DISABLED | CPSR_FIQ_DISABLED | CPSR_MODE_SUPERVISOR; } else { for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) { arm_priv(vcpu)->gpr_usr[ite] = 0x0; arm_priv(vcpu)->gpr_fiq[ite] = 0x0; } arm_priv(vcpu)->sp_usr = 0x0; arm_priv(vcpu)->lr_usr = 0x0; arm_priv(vcpu)->sp_svc = 0x0; arm_priv(vcpu)->lr_svc = 0x0; arm_priv(vcpu)->spsr_svc = 0x0; arm_priv(vcpu)->sp_mon = 0x0; arm_priv(vcpu)->lr_mon = 0x0; arm_priv(vcpu)->spsr_mon = 0x0; arm_priv(vcpu)->sp_abt = 0x0; arm_priv(vcpu)->lr_abt = 0x0; arm_priv(vcpu)->spsr_abt = 0x0; arm_priv(vcpu)->sp_und = 0x0; arm_priv(vcpu)->lr_und = 0x0; arm_priv(vcpu)->spsr_und = 0x0; arm_priv(vcpu)->sp_irq = 0x0; arm_priv(vcpu)->lr_irq = 0x0; arm_priv(vcpu)->spsr_irq = 0x0; arm_priv(vcpu)->sp_fiq = 0x0; arm_priv(vcpu)->lr_fiq = 0x0; arm_priv(vcpu)->spsr_fiq = 0x0; cpu_vcpu_cpsr_update(vcpu, arm_regs(vcpu), (CPSR_ZERO_MASK | CPSR_ASYNC_ABORT_DISABLED | CPSR_IRQ_DISABLED | CPSR_FIQ_DISABLED | CPSR_MODE_SUPERVISOR), CPSR_ALLBITS_MASK); } if (!vcpu->reset_count) { arm_priv(vcpu)->features = 0; switch (cpuid) { case ARM_CPUID_ARM926: arm_set_feature(vcpu, ARM_FEATURE_V5); arm_set_feature(vcpu, ARM_FEATURE_VFP); arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS); arm_set_feature(vcpu, ARM_FEATURE_CACHE_TEST_CLEAN); break; case ARM_CPUID_ARM11MPCORE: arm_set_feature(vcpu, ARM_FEATURE_V6); arm_set_feature(vcpu, ARM_FEATURE_V6K); arm_set_feature(vcpu, ARM_FEATURE_VFP); arm_set_feature(vcpu, ARM_FEATURE_VAPA); arm_set_feature(vcpu, ARM_FEATURE_MPIDR); arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS); break; case ARM_CPUID_CORTEXA8: arm_set_feature(vcpu, ARM_FEATURE_V7); arm_set_feature(vcpu, ARM_FEATURE_VFP3); arm_set_feature(vcpu, ARM_FEATURE_NEON); arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE); arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS); arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE); break; case ARM_CPUID_CORTEXA9: arm_set_feature(vcpu, ARM_FEATURE_V7); arm_set_feature(vcpu, ARM_FEATURE_VFP3); arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16); arm_set_feature(vcpu, ARM_FEATURE_NEON); arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE); arm_set_feature(vcpu, ARM_FEATURE_V7MP); arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE); break; default: break; }; /* Some features automatically imply others: */ if (arm_feature(vcpu, ARM_FEATURE_V7)) { arm_set_feature(vcpu, ARM_FEATURE_VAPA); arm_set_feature(vcpu, ARM_FEATURE_THUMB2); arm_set_feature(vcpu, ARM_FEATURE_MPIDR); if (!arm_feature(vcpu, ARM_FEATURE_M)) { arm_set_feature(vcpu, ARM_FEATURE_V6K); } else { arm_set_feature(vcpu, ARM_FEATURE_V6); } } if (arm_feature(vcpu, ARM_FEATURE_V6K)) { arm_set_feature(vcpu, ARM_FEATURE_V6); arm_set_feature(vcpu, ARM_FEATURE_MVFR); } if (arm_feature(vcpu, ARM_FEATURE_V6)) { arm_set_feature(vcpu, ARM_FEATURE_V5); if (!arm_feature(vcpu, ARM_FEATURE_M)) { arm_set_feature(vcpu, ARM_FEATURE_AUXCR); } } if (arm_feature(vcpu, ARM_FEATURE_V5)) { arm_set_feature(vcpu, ARM_FEATURE_V4T); } if (arm_feature(vcpu, ARM_FEATURE_M)) { arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV); } if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) { arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV); } if (arm_feature(vcpu, ARM_FEATURE_VFP4)) { arm_set_feature(vcpu, ARM_FEATURE_VFP3); } if (arm_feature(vcpu, ARM_FEATURE_VFP3)) { arm_set_feature(vcpu, ARM_FEATURE_VFP); } if (arm_feature(vcpu, ARM_FEATURE_LPAE)) { arm_set_feature(vcpu, ARM_FEATURE_PXN); } } rc = cpu_vcpu_vfp_init(vcpu); if (rc) { goto fail_vfp_init; } rc = cpu_vcpu_cp14_init(vcpu); if (rc) { goto fail_cp14_init; } rc = cpu_vcpu_cp15_init(vcpu, cpuid); if (rc) { goto fail_cp15_init; } return VMM_OK; fail_cp15_init: if (!vcpu->reset_count) { cpu_vcpu_cp14_deinit(vcpu); } fail_cp14_init: if (!vcpu->reset_count) { cpu_vcpu_vfp_deinit(vcpu); } fail_vfp_init: if (!vcpu->reset_count) { vmm_free(vcpu->arch_priv); vcpu->arch_priv = NULL; } fail: return rc; } int arch_vcpu_deinit(struct vmm_vcpu *vcpu) { int rc; /* For both Orphan & Normal VCPUs */ memset(arm_regs(vcpu), 0, sizeof(arch_regs_t)); /* For Orphan VCPUs do nothing else */ if (!vcpu->is_normal) { return VMM_OK; } /* Cleanup CP15 */ if ((rc = cpu_vcpu_cp15_deinit(vcpu))) { return rc; } /* Cleanup CP14 */ if ((rc = cpu_vcpu_cp14_deinit(vcpu))) { return rc; } /* Cleanup VFP */ if ((rc = cpu_vcpu_vfp_deinit(vcpu))) { return rc; } /* Free super regs */ vmm_free(vcpu->arch_priv); return VMM_OK; } void arch_vcpu_switch(struct vmm_vcpu *tvcpu, struct vmm_vcpu *vcpu, arch_regs_t *regs) { u32 ite; /* Save user registers & banked registers */ if (tvcpu) { arm_regs(tvcpu)->pc = regs->pc; arm_regs(tvcpu)->lr = regs->lr; arm_regs(tvcpu)->sp = regs->sp; for (ite = 0; ite < CPU_GPR_COUNT; ite++) { arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite]; } arm_regs(tvcpu)->cpsr = regs->cpsr; arm_regs(tvcpu)->sp_excp = regs->sp_excp; if (tvcpu->is_normal) { cpu_vcpu_banked_regs_save(arm_priv(tvcpu), regs); /* Save VFP regs */ cpu_vcpu_vfp_regs_save(tvcpu); /* Save CP14 regs */ cpu_vcpu_cp14_regs_save(tvcpu); /* Save CP15 regs */ cpu_vcpu_cp15_regs_save(tvcpu); } } /* Restore user registers & banked registers */ regs->pc = arm_regs(vcpu)->pc; regs->lr = arm_regs(vcpu)->lr; regs->sp = arm_regs(vcpu)->sp; for (ite = 0; ite < CPU_GPR_COUNT; ite++) { regs->gpr[ite] = arm_regs(vcpu)->gpr[ite]; } regs->cpsr = arm_regs(vcpu)->cpsr; regs->sp_excp = arm_regs(vcpu)->sp_excp; if (vcpu->is_normal) { /* Restore VFP regs */ cpu_vcpu_vfp_regs_restore(vcpu); /* Restore CP14 regs */ cpu_vcpu_cp14_regs_restore(vcpu); /* Restore CP15 regs */ cpu_vcpu_cp15_regs_restore(vcpu); /* Restore banked registers */ cpu_vcpu_banked_regs_restore(arm_priv(vcpu), regs); } else { /* Restore hypervisor TTBL for Orphan VCPUs */ if (tvcpu) { if (tvcpu->is_normal) { cpu_mmu_change_ttbr(cpu_mmu_l1tbl_default()); } } else { cpu_mmu_change_ttbr(cpu_mmu_l1tbl_default()); } } /* Clear exclusive monitor */ clrex(); } void arch_vcpu_preempt_orphan(void) { /* Trigger SVC call from supervisor mode. This will cause * do_soft_irq() function to call vmm_scheduler_preempt_orphan() */ asm volatile ("svc #0\t\n"); } static void __cpu_vcpu_dump_user_reg(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu, arch_regs_t *regs) { u32 i; vmm_cprintf(cdev, "Core Registers\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n", "SP", regs->sp, "LR", regs->lr, "PC", regs->pc); vmm_cprintf(cdev, " %7s=0x%08x\n", "CPSR", cpu_vcpu_cpsr_retrieve(vcpu, regs)); vmm_cprintf(cdev, "General Purpose Registers"); for (i = 0; i < CPU_GPR_COUNT; i++) { if (i % 3 == 0) { vmm_cprintf(cdev, "\n"); } vmm_cprintf(cdev, " %5s%02d=0x%08x", "R", i, regs->gpr[i]); } vmm_cprintf(cdev, "\n"); } void cpu_vcpu_dump_user_reg(struct vmm_vcpu *vcpu, arch_regs_t *regs) { __cpu_vcpu_dump_user_reg(NULL, vcpu, regs); } void arch_vcpu_regs_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu) { u32 i; /* For both Normal & Orphan VCPUs */ __cpu_vcpu_dump_user_reg(cdev, vcpu, arm_regs(vcpu)); /* For only Normal VCPUs */ if (!vcpu->is_normal) { return; } /* Print banked registers */ vmm_cprintf(cdev, "User Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x\n", "SP", arm_priv(vcpu)->sp_usr, "LR", arm_priv(vcpu)->lr_usr); vmm_cprintf(cdev, "Supervisor Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n", "SP", arm_priv(vcpu)->sp_svc, "LR", arm_priv(vcpu)->lr_svc, "SPSR", arm_priv(vcpu)->spsr_svc); vmm_cprintf(cdev, "Monitor Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n", "SP", arm_priv(vcpu)->sp_mon, "LR", arm_priv(vcpu)->lr_mon, "SPSR", arm_priv(vcpu)->spsr_mon); vmm_cprintf(cdev, "Abort Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n", "SP", arm_priv(vcpu)->sp_abt, "LR", arm_priv(vcpu)->lr_abt, "SPSR", arm_priv(vcpu)->spsr_abt); vmm_cprintf(cdev, "Undefined Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n", "SP", arm_priv(vcpu)->sp_und, "LR", arm_priv(vcpu)->lr_und, "SPSR", arm_priv(vcpu)->spsr_und); vmm_cprintf(cdev, "IRQ Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n", "SP", arm_priv(vcpu)->sp_irq, "LR", arm_priv(vcpu)->lr_irq, "SPSR", arm_priv(vcpu)->spsr_irq); vmm_cprintf(cdev, "FIQ Mode Registers (Banked)\n"); vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x", "SP", arm_priv(vcpu)->sp_fiq, "LR", arm_priv(vcpu)->lr_fiq, "SPSR", arm_priv(vcpu)->spsr_fiq); for (i = 0; i < 5; i++) { if (i % 3 == 0) { vmm_cprintf(cdev, "\n"); } vmm_cprintf(cdev, " %5s%02d=0x%08x", "R", (i + 8), arm_priv(vcpu)->gpr_fiq[i]); } vmm_cprintf(cdev, "\n"); /* Print VFP registers */ cpu_vcpu_vfp_regs_dump(cdev, vcpu); /* Print CP14 registers */ cpu_vcpu_cp14_regs_dump(cdev, vcpu); /* Print CP15 registers */ cpu_vcpu_cp15_regs_dump(cdev, vcpu); } void arch_vcpu_stat_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu) { /* For now no arch specific stats */ }
gpl-2.0
MinoaveDev/EccoDev
src/ChannelServer/PetHandler.cpp
1
7497
/* Copyright (C) 2008-2011 Vana Development Team This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "PetHandler.h" #include "GameConstants.h" #include "Inventory.h" #include "InventoryPacket.h" #include "ItemConstants.h" #include "ItemDataProvider.h" #include "MovementHandler.h" #include "PacketReader.h" #include "Pet.h" #include "PetsPacket.h" #include "Player.h" #include "Randomizer.h" #include "SkillConstants.h" void PetHandler::handleMovement(Player *player, PacketReader &packet) { int32_t petid = (int32_t)packet.get<int64_t>(); Pet *pet = player->getPets()->getPet(petid); if (pet == nullptr) { // Hacks return; } packet.skipBytes(4); MovementHandler::parseMovement(pet, packet); packet.reset(10); PetsPacket::showMovement(player, pet, packet.getBuffer(), packet.getBufferLength() - 9); } void PetHandler::handleChat(Player *player, PacketReader &packet) { int32_t petid = (int32_t)packet.get<int64_t>(); if (player->getPets()->getPet(petid) == nullptr) { // Hacks return; } packet.skipBytes(1); int8_t act = packet.get<int8_t>(); string message = packet.getString(); PetsPacket::showChat(player, player->getPets()->getPet(petid), message, act); } void PetHandler::handleSummon(Player *player, PacketReader &packet) { if (!player->updateTickCount(packet.get<int32_t>())) { // Tickcount was the same or less than 100 of the difference. return; } int16_t slot = packet.get<int16_t>(); bool master = packet.get<int8_t>() == 1; // Might possibly fit under getBool criteria bool multipet = player->getSkills()->getSkillLevel(Jobs::Beginner::FollowTheLead) > 0; Pet *pet = player->getPets()->getPet(player->getInventory()->getItem(Inventories::CashInventory, slot)->getPetId()); if (pet->isSummoned()) { // Removing a pet player->getPets()->setSummoned(pet->getIndex(), 0); if (pet->getIndex() == 0) { Timer::Id id(Timer::Types::PetTimer, pet->getIndex(), 0); player->getTimers()->removeTimer(id); } if (multipet) { for (int8_t i = pet->getIndex(); i < Inventories::MaxPetCount; i++) { // Shift around pets if using multipet if (Pet *move = player->getPets()->getSummoned(i)) { move->setIndex(i - 1); player->getPets()->setSummoned(move->getIndex(), move->getId()); player->getPets()->setSummoned(i, 0); if (move->getIndex() == 0) move->startTimer(); } } } int8_t index = pet->getIndex(); pet->setIndex(-1); PetsPacket::petSummoned(player, pet, false, false, index); } else { // Summoning a Pet pet->setPos(player->getPos()); if (!multipet || master) { pet->setIndex(0); if (multipet) { for (int8_t i = Inventories::MaxPetCount - 1; i > 0; i--) { if (player->getPets()->getSummoned(i - 1) && !player->getPets()->getSummoned(i)) { Pet *move = player->getPets()->getSummoned(i - 1); player->getPets()->setSummoned(i, move->getId()); player->getPets()->setSummoned(i - 1, 0); move->setIndex(i); } } PetsPacket::petSummoned(player, pet); } else if (Pet *kicked = player->getPets()->getSummoned(0)) { kicked->setIndex(-1); Timer::Id id(Timer::Types::PetTimer, kicked->getIndex(), 0); player->getTimers()->removeTimer(id); PetsPacket::petSummoned(player, pet, true); } else PetsPacket::petSummoned(player, pet); player->getPets()->setSummoned(0, pet->getId()); pet->startTimer(); } else { for (int8_t i = 0; i < Inventories::MaxPetCount; i++) { if (!player->getPets()->getSummoned(i)) { player->getPets()->setSummoned(i, pet->getId()); pet->setIndex(i); PetsPacket::petSummoned(player, pet); pet->startTimer(); break; } } } } PetsPacket::blankUpdate(player); } void PetHandler::handleFeed(Player *player, PacketReader &packet) { if (!player->updateTickCount(packet.get<int32_t>())) { // Tickcount was the same or less than 100 of the difference. return; } int16_t slot = packet.get<int16_t>(); int32_t itemid = packet.get<int32_t>(); Item *item = player->getInventory()->getItem(Inventories::UseInventory, slot); Pet *pet = player->getPets()->getSummoned(0); if (pet != nullptr && item != nullptr && item->getId() == itemid) { Inventory::takeItem(player, itemid, 1); bool success = (pet->getFullness() < Stats::MaxFullness); if (success) { PetsPacket::showAnimation(player, pet, 1); pet->modifyFullness(Stats::PetFeedFullness, false); if (Randomizer::Instance()->randInt(99) < 60) { // 60% chance for feed to add closeness pet->addCloseness(1); } } } else { InventoryPacket::blankUpdate(player); } } void PetHandler::handleCommand(Player *player, PacketReader &packet) { int32_t petid = (int32_t)packet.get<int64_t>(); Pet *pet = player->getPets()->getPet(petid); if (pet == nullptr) { // Hacks return; } packet.skipBytes(1); int8_t act = packet.get<int8_t>(); PetInteractInfo *action = ItemDataProvider::Instance()->getInteraction(pet->getItemId(), act); if (action == nullptr) { // Hacks or no action info available. return; } bool success = (Randomizer::Instance()->randInt(100) < action->prob); if (success) { pet->addCloseness(action->increase); } PetsPacket::showAnimation(player, pet, act); } void PetHandler::handleConsumePotion(Player *player, PacketReader &packet) { int32_t petid = (int32_t)packet.get<int64_t>(); Pet *pet = player->getPets()->getPet(petid); if (pet == nullptr || !pet->isSummoned() || player->getStats()->getHp() == 0) { // Hacking return; } packet.skipBytes(1); // It MIGHT be some flag for Meso/Power/Magic Guard...? if (!player->updateTickCount(packet.get<int32_t>())) { // Tickcount was the same or less than 100 of the difference. return; } int16_t slot = packet.get<int16_t>(); int32_t itemid = packet.get<int32_t>(); Item *item = player->getInventory()->getItem(Inventories::UseInventory, slot); ConsumeInfo *info = ItemDataProvider::Instance()->getConsumeInfo(itemid); if (item == nullptr || item->getId() != itemid) { // Hacking return; } // Check if the MP potion IS a MP potion set if ((info->mp != 0 || info->mpr != 0) && player->getInventory()->getAutoMpPot() != itemid) { // Hacking return; } // Check if the HP potion IS a HP potion set if ((info->hp != 0 || info->hpr != 0) && player->getInventory()->getAutoHpPot() != itemid) { // Hacking return; } Inventory::useItem(player, itemid); Inventory::takeItemSlot(player, Inventories::UseInventory, slot, 1); } void PetHandler::changeName(Player *player, const string &name) { if (Pet *pet = player->getPets()->getSummoned(0)) { pet->setName(name); } } void PetHandler::showPets(Player *player) { for (int8_t i = 0; i < Inventories::MaxPetCount; i++) { if (Pet *pet = player->getPets()->getSummoned(i)) { pet->setPos(player->getPos()); PetsPacket::petSummoned(player, pet, false, true); } } PetsPacket::updateSummonedPets(player); }
gpl-2.0
sergei/minigui
src/newgal/dummy/nullvideo.c
1
5571
/* ** $Id: nullvideo.c 7348 2007-08-16 04:53:34Z xgwang $ ** ** Copyright (C) 2003 ~ 2007 Feynman Software. ** Copyright (C) 2001 ~ 2002 Wei Yongming. */ /* Dummy GAL video driver implementation; this is just enough to make an * GAL-based application THINK it's got a working video driver, for * applications that call GAL_Init(GAL_INIT_VIDEO) when they don't need it, * and also for use as a collection of stubs when porting GAL to a new * platform for which you haven't yet written a valid video driver. * * This is also a great way to determine bottlenecks: if you think that GAL * is a performance problem for a given platform, enable this driver, and * then see if your application runs faster without video overhead. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "common.h" #include "newgal.h" #include "sysvideo.h" #include "pixels_c.h" #ifdef _NEWGAL_ENGINE_DUMMY #include "nullvideo.h" #define DUMMYVID_DRIVER_NAME "dummy" /* Initialization/Query functions */ static int DUMMY_VideoInit(_THIS, GAL_PixelFormat *vformat); static GAL_Rect **DUMMY_ListModes(_THIS, GAL_PixelFormat *format, Uint32 flags); static GAL_Surface *DUMMY_SetVideoMode(_THIS, GAL_Surface *current, int width, int height, int bpp, Uint32 flags); static int DUMMY_SetColors(_THIS, int firstcolor, int ncolors, GAL_Color *colors); static void DUMMY_VideoQuit(_THIS); /* Hardware surface functions */ static int DUMMY_AllocHWSurface(_THIS, GAL_Surface *surface); static void DUMMY_FreeHWSurface(_THIS, GAL_Surface *surface); /* DUMMY driver bootstrap functions */ static int DUMMY_Available(void) { return(1); } static void DUMMY_DeleteDevice(GAL_VideoDevice *device) { free(device->hidden); free(device); } static GAL_VideoDevice *DUMMY_CreateDevice(int devindex) { GAL_VideoDevice *device; /* Initialize all variables that we clean on shutdown */ device = (GAL_VideoDevice *)malloc(sizeof(GAL_VideoDevice)); if ( device ) { memset(device, 0, (sizeof *device)); device->hidden = (struct GAL_PrivateVideoData *) malloc((sizeof *device->hidden)); } if ( (device == NULL) || (device->hidden == NULL) ) { GAL_OutOfMemory(); if ( device ) { free(device); } return(0); } memset(device->hidden, 0, (sizeof *device->hidden)); /* Set the function pointers */ device->VideoInit = DUMMY_VideoInit; device->ListModes = DUMMY_ListModes; device->SetVideoMode = DUMMY_SetVideoMode; device->CreateYUVOverlay = NULL; device->SetColors = DUMMY_SetColors; device->VideoQuit = DUMMY_VideoQuit; #ifdef _LITE_VERSION device->RequestHWSurface = NULL; #endif device->AllocHWSurface = DUMMY_AllocHWSurface; device->CheckHWBlit = NULL; device->FillHWRect = NULL; device->SetHWColorKey = NULL; device->SetHWAlpha = NULL; device->FreeHWSurface = DUMMY_FreeHWSurface; device->free = DUMMY_DeleteDevice; return device; } VideoBootStrap DUMMY_bootstrap = { DUMMYVID_DRIVER_NAME, "Dummy video driver", DUMMY_Available, DUMMY_CreateDevice }; static int DUMMY_VideoInit(_THIS, GAL_PixelFormat *vformat) { fprintf (stderr, "NEWGAL>DUMMY: Calling init method!\n"); /* Determine the screen depth (use default 8-bit depth) */ /* we change this during the GAL_SetVideoMode implementation... */ vformat->BitsPerPixel = 8; vformat->BytesPerPixel = 1; /* We're done! */ return(0); } static GAL_Rect **DUMMY_ListModes(_THIS, GAL_PixelFormat *format, Uint32 flags) { if (format->BitsPerPixel < 8) { return NULL; } return (GAL_Rect**) -1; } static GAL_Surface *DUMMY_SetVideoMode(_THIS, GAL_Surface *current, int width, int height, int bpp, Uint32 flags) { int pitch; if (this->hidden->buffer) { free (this->hidden->buffer); } pitch = width * ((bpp + 7) / 8); pitch = (pitch + 3) & ~3; this->hidden->buffer = malloc (pitch * height); if (!this->hidden->buffer) { fprintf (stderr, "NEWGAL>DUMMY: " "Couldn't allocate buffer for requested mode\n"); return NULL; } memset (this->hidden->buffer, 0, pitch * height); /* Allocate the new pixel format for the screen */ if (!GAL_ReallocFormat (current, bpp, 0, 0, 0, 0)) { free(this->hidden->buffer); this->hidden->buffer = NULL; fprintf (stderr, "NEWGAL>DUMMY: " "Couldn't allocate new pixel format for requested mode\n"); return(NULL); } /* Set up the new mode framebuffer */ current->flags = flags & GAL_FULLSCREEN; this->hidden->w = current->w = width; this->hidden->h = current->h = height; current->pitch = pitch; current->pixels = this->hidden->buffer; /* We're done */ return(current); } /* We don't actually allow hardware surfaces other than the main one */ static int DUMMY_AllocHWSurface(_THIS, GAL_Surface *surface) { return(-1); } static void DUMMY_FreeHWSurface(_THIS, GAL_Surface *surface) { surface->pixels = NULL; } static int DUMMY_SetColors(_THIS, int firstcolor, int ncolors, GAL_Color *colors) { /* do nothing of note. */ return(1); } /* Note: If we are terminated, this could be called in the middle of another video routine -- notably UpdateRects. */ static void DUMMY_VideoQuit(_THIS) { if (this->screen->pixels != NULL) { free(this->screen->pixels); this->screen->pixels = NULL; } } #endif /* _NEWGAL_ENGINE_DUMMY */
gpl-2.0
kh007im/ngawi-ics
drivers/spi/spi_txx9.c
513
12302
/* * spi_txx9.c - TXx9 SPI controller driver. * * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c * Copyright (C) 2000-2001 Toshiba Corporation * * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. * * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) * * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp) */ #include <linux/init.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/spi/spi.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/gpio.h> #define SPI_FIFO_SIZE 4 #define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */ #define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */ #define TXx9_SPMCR 0x00 #define TXx9_SPCR0 0x04 #define TXx9_SPCR1 0x08 #define TXx9_SPFS 0x0c #define TXx9_SPSR 0x14 #define TXx9_SPDR 0x18 /* SPMCR : SPI Master Control */ #define TXx9_SPMCR_OPMODE 0xc0 #define TXx9_SPMCR_CONFIG 0x40 #define TXx9_SPMCR_ACTIVE 0x80 #define TXx9_SPMCR_SPSTP 0x02 #define TXx9_SPMCR_BCLR 0x01 /* SPCR0 : SPI Control 0 */ #define TXx9_SPCR0_TXIFL_MASK 0xc000 #define TXx9_SPCR0_RXIFL_MASK 0x3000 #define TXx9_SPCR0_SIDIE 0x0800 #define TXx9_SPCR0_SOEIE 0x0400 #define TXx9_SPCR0_RBSIE 0x0200 #define TXx9_SPCR0_TBSIE 0x0100 #define TXx9_SPCR0_IFSPSE 0x0010 #define TXx9_SPCR0_SBOS 0x0004 #define TXx9_SPCR0_SPHA 0x0002 #define TXx9_SPCR0_SPOL 0x0001 /* SPSR : SPI Status */ #define TXx9_SPSR_TBSI 0x8000 #define TXx9_SPSR_RBSI 0x4000 #define TXx9_SPSR_TBS_MASK 0x3800 #define TXx9_SPSR_RBS_MASK 0x0700 #define TXx9_SPSR_SPOE 0x0080 #define TXx9_SPSR_IFSD 0x0008 #define TXx9_SPSR_SIDLE 0x0004 #define TXx9_SPSR_STRDY 0x0002 #define TXx9_SPSR_SRRDY 0x0001 struct txx9spi { struct workqueue_struct *workqueue; struct work_struct work; spinlock_t lock; /* protect 'queue' */ struct list_head queue; wait_queue_head_t waitq; void __iomem *membase; int baseclk; struct clk *clk; u32 max_speed_hz, min_speed_hz; int last_chipselect; int last_chipselect_val; }; static u32 txx9spi_rd(struct txx9spi *c, int reg) { return __raw_readl(c->membase + reg); } static void txx9spi_wr(struct txx9spi *c, u32 val, int reg) { __raw_writel(val, c->membase + reg); } static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, int on, unsigned int cs_delay) { int val = (spi->mode & SPI_CS_HIGH) ? on : !on; if (on) { /* deselect the chip with cs_change hint in last transfer */ if (c->last_chipselect >= 0) gpio_set_value(c->last_chipselect, !c->last_chipselect_val); c->last_chipselect = spi->chip_select; c->last_chipselect_val = val; } else { c->last_chipselect = -1; ndelay(cs_delay); /* CS Hold Time */ } gpio_set_value(spi->chip_select, val); ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */ } static int txx9spi_setup(struct spi_device *spi) { struct txx9spi *c = spi_master_get_devdata(spi->master); u8 bits_per_word; if (!spi->max_speed_hz || spi->max_speed_hz > c->max_speed_hz || spi->max_speed_hz < c->min_speed_hz) return -EINVAL; bits_per_word = spi->bits_per_word; if (bits_per_word != 8 && bits_per_word != 16) return -EINVAL; if (gpio_direction_output(spi->chip_select, !(spi->mode & SPI_CS_HIGH))) { dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n"); return -EINVAL; } /* deselect chip */ spin_lock(&c->lock); txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz); spin_unlock(&c->lock); return 0; } static irqreturn_t txx9spi_interrupt(int irq, void *dev_id) { struct txx9spi *c = dev_id; /* disable rx intr */ txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE, TXx9_SPCR0); wake_up(&c->waitq); return IRQ_HANDLED; } static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) { struct spi_device *spi = m->spi; struct spi_transfer *t; unsigned int cs_delay; unsigned int cs_change = 1; int status = 0; u32 mcr; u32 prev_speed_hz = 0; u8 prev_bits_per_word = 0; /* CS setup/hold/recovery time in nsec */ cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz; mcr = txx9spi_rd(c, TXx9_SPMCR); if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) { dev_err(&spi->dev, "Bad mode.\n"); status = -EIO; goto exit; } mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); txx9spi_wr(c, TXx9_SPCR0_SBOS | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0) | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0) | 0x08, TXx9_SPCR0); list_for_each_entry (t, &m->transfers, transfer_list) { const void *txbuf = t->tx_buf; void *rxbuf = t->rx_buf; u32 data; unsigned int len = t->len; unsigned int wsize; u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; bits_per_word = bits_per_word ? : 8; wsize = bits_per_word >> 3; /* in bytes */ if (prev_speed_hz != speed_hz || prev_bits_per_word != bits_per_word) { int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1); /* enter active mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR); prev_speed_hz = speed_hz; prev_bits_per_word = bits_per_word; } if (cs_change) txx9spi_cs_func(spi, c, 1, cs_delay); cs_change = t->cs_change; while (len) { unsigned int count = SPI_FIFO_SIZE; int i; u32 cr0; if (len < count * wsize) count = len / wsize; /* now tx must be idle... */ while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE)) cpu_relax(); cr0 = txx9spi_rd(c, TXx9_SPCR0); cr0 &= ~TXx9_SPCR0_RXIFL_MASK; cr0 |= (count - 1) << 12; /* enable rx intr */ cr0 |= TXx9_SPCR0_RBSIE; txx9spi_wr(c, cr0, TXx9_SPCR0); /* send */ for (i = 0; i < count; i++) { if (txbuf) { data = (wsize == 1) ? *(const u8 *)txbuf : *(const u16 *)txbuf; txx9spi_wr(c, data, TXx9_SPDR); txbuf += wsize; } else txx9spi_wr(c, 0, TXx9_SPDR); } /* wait all rx data */ wait_event(c->waitq, txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI); /* receive */ for (i = 0; i < count; i++) { data = txx9spi_rd(c, TXx9_SPDR); if (rxbuf) { if (wsize == 1) *(u8 *)rxbuf = data; else *(u16 *)rxbuf = data; rxbuf += wsize; } } len -= count * wsize; } m->actual_length += t->len; if (t->delay_usecs) udelay(t->delay_usecs); if (!cs_change) continue; if (t->transfer_list.next == &m->transfers) break; /* sometimes a short mid-message deselect of the chip * may be needed to terminate a mode or command */ txx9spi_cs_func(spi, c, 0, cs_delay); } exit: m->status = status; m->complete(m->context); /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably * be for this chip too. */ if (!(status == 0 && cs_change)) txx9spi_cs_func(spi, c, 0, cs_delay); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); } static void txx9spi_work(struct work_struct *work) { struct txx9spi *c = container_of(work, struct txx9spi, work); unsigned long flags; spin_lock_irqsave(&c->lock, flags); while (!list_empty(&c->queue)) { struct spi_message *m; m = container_of(c->queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irqrestore(&c->lock, flags); txx9spi_work_one(c, m); spin_lock_irqsave(&c->lock, flags); } spin_unlock_irqrestore(&c->lock, flags); } static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m) { struct spi_master *master = spi->master; struct txx9spi *c = spi_master_get_devdata(master); struct spi_transfer *t; unsigned long flags; m->actual_length = 0; /* check each transfer's parameters */ list_for_each_entry (t, &m->transfers, transfer_list) { u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; bits_per_word = bits_per_word ? : 8; if (!t->tx_buf && !t->rx_buf && t->len) return -EINVAL; if (bits_per_word != 8 && bits_per_word != 16) return -EINVAL; if (t->len & ((bits_per_word >> 3) - 1)) return -EINVAL; if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz) return -EINVAL; } spin_lock_irqsave(&c->lock, flags); list_add_tail(&m->queue, &c->queue); queue_work(c->workqueue, &c->work); spin_unlock_irqrestore(&c->lock, flags); return 0; } static int __init txx9spi_probe(struct platform_device *dev) { struct spi_master *master; struct txx9spi *c; struct resource *res; int ret = -ENODEV; u32 mcr; int irq; master = spi_alloc_master(&dev->dev, sizeof(*c)); if (!master) return ret; c = spi_master_get_devdata(master); platform_set_drvdata(dev, master); INIT_WORK(&c->work, txx9spi_work); spin_lock_init(&c->lock); INIT_LIST_HEAD(&c->queue); init_waitqueue_head(&c->waitq); c->clk = clk_get(&dev->dev, "spi-baseclk"); if (IS_ERR(c->clk)) { ret = PTR_ERR(c->clk); c->clk = NULL; goto exit; } ret = clk_enable(c->clk); if (ret) { clk_put(c->clk); c->clk = NULL; goto exit; } c->baseclk = clk_get_rate(c->clk); c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) goto exit_busy; if (!devm_request_mem_region(&dev->dev, res->start, res->end - res->start + 1, "spi_txx9")) goto exit_busy; c->membase = devm_ioremap(&dev->dev, res->start, res->end - res->start + 1); if (!c->membase) goto exit_busy; /* enter config mode */ mcr = txx9spi_rd(c, TXx9_SPMCR); mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); irq = platform_get_irq(dev, 0); if (irq < 0) goto exit_busy; ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0, "spi_txx9", c); if (ret) goto exit; c->workqueue = create_singlethread_workqueue( dev_name(master->dev.parent)); if (!c->workqueue) goto exit_busy; c->last_chipselect = -1; dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n", (unsigned long long)res->start, irq, (c->baseclk + 500000) / 1000000); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; master->bus_num = dev->id; master->setup = txx9spi_setup; master->transfer = txx9spi_transfer; master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */ ret = spi_register_master(master); if (ret) goto exit; return 0; exit_busy: ret = -EBUSY; exit: if (c->workqueue) destroy_workqueue(c->workqueue); if (c->clk) { clk_disable(c->clk); clk_put(c->clk); } platform_set_drvdata(dev, NULL); spi_master_put(master); return ret; } static int __exit txx9spi_remove(struct platform_device *dev) { struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); struct txx9spi *c = spi_master_get_devdata(master); spi_unregister_master(master); platform_set_drvdata(dev, NULL); destroy_workqueue(c->workqueue); clk_disable(c->clk); clk_put(c->clk); spi_master_put(master); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:spi_txx9"); static struct platform_driver txx9spi_driver = { .remove = __exit_p(txx9spi_remove), .driver = { .name = "spi_txx9", .owner = THIS_MODULE, }, }; static int __init txx9spi_init(void) { return platform_driver_probe(&txx9spi_driver, txx9spi_probe); } subsys_initcall(txx9spi_init); static void __exit txx9spi_exit(void) { platform_driver_unregister(&txx9spi_driver); } module_exit(txx9spi_exit); MODULE_DESCRIPTION("TXx9 SPI Driver"); MODULE_LICENSE("GPL");
gpl-2.0
fards/hannspree_Oem_2.6.32.9-
drivers/block/paride/pf.c
513
24545
/* pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. This is the high-level driver for parallel port ATAPI disk drives based on chips supported by the paride module. By default, the driver will autoprobe for a single parallel port ATAPI disk drive, but if their individual parameters are specified, the driver can handle up to 4 drives. The behaviour of the pf driver can be altered by setting some parameters from the insmod command line. The following parameters are adjustable: drive0 These four arguments can be arrays of drive1 1-7 integers as follows: drive2 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly> Where, <prt> is the base of the parallel port address for the corresponding drive. (required) <pro> is the protocol number for the adapter that supports this drive. These numbers are logged by 'paride' when the protocol modules are initialised. (0 if not given) <uni> for those adapters that support chained devices, this is the unit selector for the chain of devices on the given port. It should be zero for devices that don't support chaining. (0 if not given) <mod> this can be -1 to choose the best mode, or one of the mode numbers supported by the adapter. (-1 if not given) <slv> ATAPI CDroms can be jumpered to master or slave. Set this to 0 to choose the master drive, 1 to choose the slave, -1 (the default) to choose the first drive found. <lun> Some ATAPI devices support multiple LUNs. One example is the ATAPI PD/CD drive from Matshita/Panasonic. This device has a CD drive on LUN 0 and a PD drive on LUN 1. By default, the driver will search for the first LUN with a supported device. Set this parameter to force it to use a specific LUN. (default -1) <dly> some parallel ports require the driver to go more slowly. -1 sets a default value that should work with the chosen protocol. Otherwise, set this to a small integer, the larger it is the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) major You may use this parameter to overide the default major number (47) that this driver will use. Be sure to change the device name as well. name This parameter is a character string that contains the name the kernel will use for this device (in /proc output, for instance). (default "pf"). cluster The driver will attempt to aggregate requests for adjacent blocks into larger multi-block clusters. The maximum cluster size (in 512 byte sectors) is set with this parameter. (default 64) verbose This parameter controls the amount of logging that the driver will do. Set it to 0 for normal operation, 1 to see autoprobe progress messages, or 2 to see additional debugging output. (default 0) nice This parameter controls the driver's use of idle CPU time, at the expense of some speed. If this driver is built into the kernel, you can use the following command line parameters, with the same values as the corresponding module parameters listed above: pf.drive0 pf.drive1 pf.drive2 pf.drive3 pf.cluster pf.nice In addition, you can use the parameter pf.disable to disable the driver entirely. */ /* Changes: 1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti(). Fix for drives that don't clear STAT_ERR until after next CDB delivered. Small change in pf_completion to round up transfer size. 1.02 GRG 1998.06.16 Eliminated an Ugh 1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging 1.04 GRG 1998.09.24 Added jumbo support */ #define PF_VERSION "1.04" #define PF_MAJOR 47 #define PF_NAME "pf" #define PF_UNITS 4 /* Here are things one can override from the insmod command. Most are autoprobed by paride unless set here. Verbose is off by default. */ static int verbose = 0; static int major = PF_MAJOR; static char *name = PF_NAME; static int cluster = 64; static int nice = 0; static int disable = 0; static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3}; static int pf_drive_count; enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY}; /* end of parameters */ #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/cdrom.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <asm/uaccess.h> static DEFINE_SPINLOCK(pf_spin_lock); module_param(verbose, bool, 0644); module_param(major, int, 0); module_param(name, charp, 0); module_param(cluster, int, 0); module_param(nice, int, 0); module_param_array(drive0, int, NULL, 0); module_param_array(drive1, int, NULL, 0); module_param_array(drive2, int, NULL, 0); module_param_array(drive3, int, NULL, 0); #include "paride.h" #include "pseudo.h" /* constants for faking geometry numbers */ #define PF_FD_MAX 8192 /* use FD geometry under this size */ #define PF_FD_HDS 2 #define PF_FD_SPT 18 #define PF_HD_HDS 64 #define PF_HD_SPT 32 #define PF_MAX_RETRIES 5 #define PF_TMO 800 /* interrupt timeout in jiffies */ #define PF_SPIN_DEL 50 /* spin delay in micro-seconds */ #define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL) #define STAT_ERR 0x00001 #define STAT_INDEX 0x00002 #define STAT_ECC 0x00004 #define STAT_DRQ 0x00008 #define STAT_SEEK 0x00010 #define STAT_WRERR 0x00020 #define STAT_READY 0x00040 #define STAT_BUSY 0x00080 #define ATAPI_REQ_SENSE 0x03 #define ATAPI_LOCK 0x1e #define ATAPI_DOOR 0x1b #define ATAPI_MODE_SENSE 0x5a #define ATAPI_CAPACITY 0x25 #define ATAPI_IDENTIFY 0x12 #define ATAPI_READ_10 0x28 #define ATAPI_WRITE_10 0x2a static int pf_open(struct block_device *bdev, fmode_t mode); static void do_pf_request(struct request_queue * q); static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int pf_release(struct gendisk *disk, fmode_t mode); static int pf_detect(void); static void do_pf_read(void); static void do_pf_read_start(void); static void do_pf_write(void); static void do_pf_write_start(void); static void do_pf_read_drq(void); static void do_pf_write_done(void); #define PF_NM 0 #define PF_RO 1 #define PF_RW 2 #define PF_NAMELEN 8 struct pf_unit { struct pi_adapter pia; /* interface to paride layer */ struct pi_adapter *pi; int removable; /* removable media device ? */ int media_status; /* media present ? WP ? */ int drive; /* drive */ int lun; int access; /* count of active opens ... */ int present; /* device present ? */ char name[PF_NAMELEN]; /* pf0, pf1, ... */ struct gendisk *disk; }; static struct pf_unit units[PF_UNITS]; static int pf_identify(struct pf_unit *pf); static void pf_lock(struct pf_unit *pf, int func); static void pf_eject(struct pf_unit *pf); static int pf_check_media(struct gendisk *disk); static char pf_scratch[512]; /* scratch block buffer */ /* the variables below are used mainly in the I/O request engine, which processes only one request at a time. */ static int pf_retries = 0; /* i/o error retry count */ static int pf_busy = 0; /* request being processed ? */ static struct request *pf_req; /* current request */ static int pf_block; /* address of next requested block */ static int pf_count; /* number of blocks still to do */ static int pf_run; /* sectors in current cluster */ static int pf_cmd; /* current command READ/WRITE */ static struct pf_unit *pf_current;/* unit of current request */ static int pf_mask; /* stopper for pseudo-int */ static char *pf_buf; /* buffer for request in progress */ /* kernel glue structures */ static const struct block_device_operations pf_fops = { .owner = THIS_MODULE, .open = pf_open, .release = pf_release, .locked_ioctl = pf_ioctl, .getgeo = pf_getgeo, .media_changed = pf_check_media, }; static void __init pf_init_units(void) { struct pf_unit *pf; int unit; pf_drive_count = 0; for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) { struct gendisk *disk = alloc_disk(1); if (!disk) continue; pf->disk = disk; pf->pi = &pf->pia; pf->media_status = PF_NM; pf->drive = (*drives[unit])[D_SLV]; pf->lun = (*drives[unit])[D_LUN]; snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit); disk->major = major; disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } } static int pf_open(struct block_device *bdev, fmode_t mode) { struct pf_unit *pf = bdev->bd_disk->private_data; pf_identify(pf); if (pf->media_status == PF_NM) return -ENODEV; if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) return -EROFS; pf->access++; if (pf->removable) pf_lock(pf, 1); return 0; } static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct pf_unit *pf = bdev->bd_disk->private_data; sector_t capacity = get_capacity(pf->disk); if (capacity < PF_FD_MAX) { geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT); geo->heads = PF_FD_HDS; geo->sectors = PF_FD_SPT; } else { geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT); geo->heads = PF_HD_HDS; geo->sectors = PF_HD_SPT; } return 0; } static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct pf_unit *pf = bdev->bd_disk->private_data; if (cmd != CDROMEJECT) return -EINVAL; if (pf->access != 1) return -EBUSY; pf_eject(pf); return 0; } static int pf_release(struct gendisk *disk, fmode_t mode) { struct pf_unit *pf = disk->private_data; if (pf->access <= 0) return -EINVAL; pf->access--; if (!pf->access && pf->removable) pf_lock(pf, 0); return 0; } static int pf_check_media(struct gendisk *disk) { return 1; } static inline int status_reg(struct pf_unit *pf) { return pi_read_regr(pf->pi, 1, 6); } static inline int read_reg(struct pf_unit *pf, int reg) { return pi_read_regr(pf->pi, 0, reg); } static inline void write_reg(struct pf_unit *pf, int reg, int val) { pi_write_regr(pf->pi, 0, reg, val); } static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg) { int j, r, e, s, p; j = 0; while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop)))) && (j++ < PF_SPIN)) udelay(PF_SPIN_DEL); if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) { s = read_reg(pf, 7); e = read_reg(pf, 1); p = read_reg(pf, 2); if (j >= PF_SPIN) e |= 0x100; if (fun) printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" " loop=%d phase=%d\n", pf->name, fun, msg, r, s, e, j, p); return (e << 8) + s; } return 0; } static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun) { pi_connect(pf->pi); write_reg(pf, 6, 0xa0+0x10*pf->drive); if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) { pi_disconnect(pf->pi); return -1; } write_reg(pf, 4, dlen % 256); write_reg(pf, 5, dlen / 256); write_reg(pf, 7, 0xa0); /* ATAPI packet command */ if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) { pi_disconnect(pf->pi); return -1; } if (read_reg(pf, 2) != 1) { printk("%s: %s: command phase error\n", pf->name, fun); pi_disconnect(pf->pi); return -1; } pi_write_block(pf->pi, cmd, 12); return 0; } static int pf_completion(struct pf_unit *pf, char *buf, char *fun) { int r, s, n; r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, fun, "completion"); if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) { n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) + 3) & 0xfffc); pi_read_block(pf->pi, buf, n); } s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done"); pi_disconnect(pf->pi); return (r ? r : s); } static void pf_req_sense(struct pf_unit *pf, int quiet) { char rs_cmd[12] = { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; char buf[16]; int r; r = pf_command(pf, rs_cmd, 16, "Request sense"); mdelay(1); if (!r) pf_completion(pf, buf, "Request sense"); if ((!r) && (!quiet)) printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n", pf->name, buf[2] & 0xf, buf[12], buf[13]); } static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun) { int r; r = pf_command(pf, cmd, dlen, fun); mdelay(1); if (!r) r = pf_completion(pf, buf, fun); if (r) pf_req_sense(pf, !fun); return r; } static void pf_lock(struct pf_unit *pf, int func) { char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 }; pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock"); } static void pf_eject(struct pf_unit *pf) { char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 }; pf_lock(pf, 0); pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject"); } #define PF_RESET_TMO 30 /* in tenths of a second */ static void pf_sleep(int cs) { schedule_timeout_interruptible(cs); } /* the ATAPI standard actually specifies the contents of all 7 registers after a reset, but the specification is ambiguous concerning the last two bytes, and different drives interpret the standard differently. */ static int pf_reset(struct pf_unit *pf) { int i, k, flg; int expect[5] = { 1, 1, 1, 0x14, 0xeb }; pi_connect(pf->pi); write_reg(pf, 6, 0xa0+0x10*pf->drive); write_reg(pf, 7, 8); pf_sleep(20 * HZ / 1000); k = 0; while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY)) pf_sleep(HZ / 10); flg = 1; for (i = 0; i < 5; i++) flg &= (read_reg(pf, i + 1) == expect[i]); if (verbose) { printk("%s: Reset (%d) signature = ", pf->name, k); for (i = 0; i < 5; i++) printk("%3x", read_reg(pf, i + 1)); if (!flg) printk(" (incorrect)"); printk("\n"); } pi_disconnect(pf->pi); return flg - 1; } static void pf_mode_sense(struct pf_unit *pf) { char ms_cmd[12] = { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 }; char buf[8]; pf_atapi(pf, ms_cmd, 8, buf, "mode sense"); pf->media_status = PF_RW; if (buf[3] & 0x80) pf->media_status = PF_RO; } static void xs(char *buf, char *targ, int offs, int len) { int j, k, l; j = 0; l = 0; for (k = 0; k < len; k++) if ((buf[k + offs] != 0x20) || (buf[k + offs] != l)) l = targ[j++] = buf[k + offs]; if (l == 0x20) j--; targ[j] = 0; } static int xl(char *buf, int offs) { int v, k; v = 0; for (k = 0; k < 4; k++) v = v * 256 + (buf[k + offs] & 0xff); return v; } static void pf_get_capacity(struct pf_unit *pf) { char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; char buf[8]; int bs; if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) { pf->media_status = PF_NM; return; } set_capacity(pf->disk, xl(buf, 0) + 1); bs = xl(buf, 4); if (bs != 512) { set_capacity(pf->disk, 0); if (verbose) printk("%s: Drive %d, LUN %d," " unsupported block size %d\n", pf->name, pf->drive, pf->lun, bs); } } static int pf_identify(struct pf_unit *pf) { int dt, s; char *ms[2] = { "master", "slave" }; char mf[10], id[18]; char id_cmd[12] = { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; char buf[36]; s = pf_atapi(pf, id_cmd, 36, buf, "identify"); if (s) return -1; dt = buf[0] & 0x1f; if ((dt != 0) && (dt != 7)) { if (verbose) printk("%s: Drive %d, LUN %d, unsupported type %d\n", pf->name, pf->drive, pf->lun, dt); return -1; } xs(buf, mf, 8, 8); xs(buf, id, 16, 16); pf->removable = (buf[1] & 0x80); pf_mode_sense(pf); pf_mode_sense(pf); pf_mode_sense(pf); pf_get_capacity(pf); printk("%s: %s %s, %s LUN %d, type %d", pf->name, mf, id, ms[pf->drive], pf->lun, dt); if (pf->removable) printk(", removable"); if (pf->media_status == PF_NM) printk(", no media\n"); else { if (pf->media_status == PF_RO) printk(", RO"); printk(", %llu blocks\n", (unsigned long long)get_capacity(pf->disk)); } return 0; } /* returns 0, with id set if drive is detected -1, if drive detection failed */ static int pf_probe(struct pf_unit *pf) { if (pf->drive == -1) { for (pf->drive = 0; pf->drive <= 1; pf->drive++) if (!pf_reset(pf)) { if (pf->lun != -1) return pf_identify(pf); else for (pf->lun = 0; pf->lun < 8; pf->lun++) if (!pf_identify(pf)) return 0; } } else { if (pf_reset(pf)) return -1; if (pf->lun != -1) return pf_identify(pf); for (pf->lun = 0; pf->lun < 8; pf->lun++) if (!pf_identify(pf)) return 0; } return -1; } static int pf_detect(void) { struct pf_unit *pf = units; int k, unit; printk("%s: %s version %s, major %d, cluster %d, nice %d\n", name, name, PF_VERSION, major, cluster, nice); k = 0; if (pf_drive_count == 0) { if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF, verbose, pf->name)) { if (!pf_probe(pf) && pf->disk) { pf->present = 1; k++; } else pi_release(pf->pi); } } else for (unit = 0; unit < PF_UNITS; unit++, pf++) { int *conf = *drives[unit]; if (!conf[D_PRT]) continue; if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD], conf[D_UNI], conf[D_PRO], conf[D_DLY], pf_scratch, PI_PF, verbose, pf->name)) { if (pf->disk && !pf_probe(pf)) { pf->present = 1; k++; } else pi_release(pf->pi); } } if (k) return 0; printk("%s: No ATAPI disk detected\n", name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -1; } /* The i/o request engine */ static int pf_start(struct pf_unit *pf, int cmd, int b, int c) { int i; char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; for (i = 0; i < 4; i++) { io_cmd[5 - i] = b & 0xff; b = b >> 8; } io_cmd[8] = c & 0xff; io_cmd[7] = (c >> 8) & 0xff; i = pf_command(pf, io_cmd, c * 512, "start i/o"); mdelay(1); return i; } static int pf_ready(void) { return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask)); } static struct request_queue *pf_queue; static void pf_end_request(int err) { if (pf_req && !__blk_end_request_cur(pf_req, err)) pf_req = NULL; } static void do_pf_request(struct request_queue * q) { if (pf_busy) return; repeat: if (!pf_req) { pf_req = blk_fetch_request(q); if (!pf_req) return; } pf_current = pf_req->rq_disk->private_data; pf_block = blk_rq_pos(pf_req); pf_run = blk_rq_sectors(pf_req); pf_count = blk_rq_cur_sectors(pf_req); if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { pf_end_request(-EIO); goto repeat; } pf_cmd = rq_data_dir(pf_req); pf_buf = pf_req->buffer; pf_retries = 0; pf_busy = 1; if (pf_cmd == READ) pi_do_claimed(pf_current->pi, do_pf_read); else if (pf_cmd == WRITE) pi_do_claimed(pf_current->pi, do_pf_write); else { pf_busy = 0; pf_end_request(-EIO); goto repeat; } } static int pf_next_buf(void) { unsigned long saved_flags; pf_count--; pf_run--; pf_buf += 512; pf_block++; if (!pf_run) return 1; if (!pf_count) { spin_lock_irqsave(&pf_spin_lock, saved_flags); pf_end_request(0); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); if (!pf_req) return 1; pf_count = blk_rq_cur_sectors(pf_req); pf_buf = pf_req->buffer; } return 0; } static inline void next_request(int err) { unsigned long saved_flags; spin_lock_irqsave(&pf_spin_lock, saved_flags); pf_end_request(err); pf_busy = 0; do_pf_request(pf_queue); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); } /* detach from the calling context - in case the spinlock is held */ static void do_pf_read(void) { ps_set_intr(do_pf_read_start, NULL, 0, nice); } static void do_pf_read_start(void) { pf_busy = 1; if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_read_start); return; } next_request(-EIO); return; } pf_mask = STAT_DRQ; ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice); } static void do_pf_read_drq(void) { while (1) { if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR, "read block", "completion") & STAT_ERR) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_req_sense(pf_current, 0); pf_retries++; pi_do_claimed(pf_current->pi, do_pf_read_start); return; } next_request(-EIO); return; } pi_read_block(pf_current->pi, pf_buf, 512); if (pf_next_buf()) break; } pi_disconnect(pf_current->pi); next_request(0); } static void do_pf_write(void) { ps_set_intr(do_pf_write_start, NULL, 0, nice); } static void do_pf_write_start(void) { pf_busy = 1; if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_write_start); return; } next_request(-EIO); return; } while (1) { if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR, "write block", "data wait") & STAT_ERR) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_write_start); return; } next_request(-EIO); return; } pi_write_block(pf_current->pi, pf_buf, 512); if (pf_next_buf()) break; } pf_mask = 0; ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice); } static void do_pf_write_done(void) { if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_write_start); return; } next_request(-EIO); return; } pi_disconnect(pf_current->pi); next_request(0); } static int __init pf_init(void) { /* preliminary initialisation */ struct pf_unit *pf; int unit; if (disable) return -EINVAL; pf_init_units(); if (pf_detect()) return -ENODEV; pf_busy = 0; if (register_blkdev(major, name)) { for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -EBUSY; } pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock); if (!pf_queue) { unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -ENOMEM; } blk_queue_max_phys_segments(pf_queue, cluster); blk_queue_max_hw_segments(pf_queue, cluster); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { struct gendisk *disk = pf->disk; if (!pf->present) continue; disk->private_data = pf; disk->queue = pf_queue; add_disk(disk); } return 0; } static void __exit pf_exit(void) { struct pf_unit *pf; int unit; unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { if (!pf->present) continue; del_gendisk(pf->disk); put_disk(pf->disk); pi_release(pf->pi); } blk_cleanup_queue(pf_queue); } MODULE_LICENSE("GPL"); module_init(pf_init) module_exit(pf_exit)
gpl-2.0
Zenfone2-Dev/Flare-AEL-X
drivers/gpu/drm/nouveau/core/core/parent.c
2561
3307
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/object.h> #include <core/parent.h> #include <core/client.h> int nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, struct nouveau_object **pengine, struct nouveau_oclass **poclass) { struct nouveau_sclass *sclass; struct nouveau_engine *engine; struct nouveau_oclass *oclass; u64 mask; sclass = nv_parent(parent)->sclass; while (sclass) { if ((sclass->oclass->handle & 0xffff) == handle) { *pengine = parent->engine; *poclass = sclass->oclass; return 0; } sclass = sclass->sclass; } mask = nv_parent(parent)->engine; while (mask) { int i = ffsll(mask) - 1; if (nv_iclass(parent, NV_CLIENT_CLASS)) engine = nv_engine(nv_client(parent)->device); else engine = nouveau_engine(parent, i); if (engine) { oclass = engine->sclass; while (oclass->ofuncs) { if ((oclass->handle & 0xffff) == handle) { *pengine = nv_object(engine); *poclass = oclass; return 0; } oclass++; } } mask &= ~(1ULL << i); } return -EINVAL; } int nouveau_parent_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u32 pclass, struct nouveau_oclass *sclass, u64 engcls, int size, void **pobject) { struct nouveau_parent *object; struct nouveau_sclass *nclass; int ret; ret = nouveau_object_create_(parent, engine, oclass, pclass | NV_PARENT_CLASS, size, pobject); object = *pobject; if (ret) return ret; while (sclass && sclass->ofuncs) { nclass = kzalloc(sizeof(*nclass), GFP_KERNEL); if (!nclass) return -ENOMEM; nclass->sclass = object->sclass; object->sclass = nclass; nclass->engine = engine ? nv_engine(engine) : NULL; nclass->oclass = sclass; sclass++; } object->engine = engcls; return 0; } void nouveau_parent_destroy(struct nouveau_parent *parent) { struct nouveau_sclass *sclass; while ((sclass = parent->sclass)) { parent->sclass = sclass->sclass; kfree(sclass); } nouveau_object_destroy(&parent->base); } void _nouveau_parent_dtor(struct nouveau_object *object) { nouveau_parent_destroy(nv_parent(object)); }
gpl-2.0
MoKee/android_kernel_motorola_msm8960-common
drivers/net/hp-plus.c
3585
15316
/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */ /* Written 1994 by Donald Becker. This driver is for the Hewlett Packard PC LAN (27***) plus ethercards. These cards are sold under several model numbers, usually 2724*. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 As is often the case, a great deal of credit is owed to Russ Nelson. The Crynwr packet driver was my primary source of HP-specific programming information. */ static const char version[] = "hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; #include <linux/module.h> #include <linux/string.h> /* Important -- this inlines word moves. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/system.h> #include <asm/io.h> #include "8390.h" #define DRV_NAME "hp-plus" /* A zero-terminated list of I/O addresses to be probed. */ static unsigned int hpplus_portlist[] __initdata = {0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0}; /* The HP EtherTwist chip implementation is a fairly routine DP8390 implementation. It allows both shared memory and programmed-I/O buffer access, using a custom interface for both. The programmed-I/O mode is entirely implemented in the HP EtherTwist chip, bypassing the problem ridden built-in 8390 facilities used on NE2000 designs. The shared memory mode is likewise special, with an offset register used to make packets appear at the shared memory base. Both modes use a base and bounds page register to hide the Rx ring buffer wrap -- a packet that spans the end of physical buffer memory appears continuous to the driver. (c.f. the 3c503 and Cabletron E2100) A special note: the internal buffer of the board is only 8 bits wide. This lays several nasty traps for the unaware: - the 8390 must be programmed for byte-wide operations - all I/O and memory operations must work on whole words (the access latches are serially preloaded and have no byte-swapping ability). This board is laid out in I/O space much like the earlier HP boards: the first 16 locations are for the board registers, and the second 16 are for the 8390. The board is easy to identify, with both a dedicated 16 bit ID register and a constant 0x530* value in the upper bits of the paging register. */ #define HP_ID 0x00 /* ID register, always 0x4850. */ #define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */ #define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */ #define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */ #define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */ #define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */ #define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */ #define HP_IO_EXTENT 32 #define HP_START_PG 0x00 /* First page of TX buffer */ #define HP_STOP_PG 0x80 /* Last page +1 of RX ring */ /* The register set selected in HP_PAGING. */ enum PageName { Perf_Page = 0, /* Normal operation. */ MAC_Page = 1, /* The ethernet address (+checksum). */ HW_Page = 2, /* EEPROM-loaded hardware parameters. */ LAN_Page = 4, /* Transceiver selection, testing, etc. */ ID_Page = 6 }; /* The bit definitions for the HPP_OPTION register. */ enum HP_Option { NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */ EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20, MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, }; static int hpp_probe1(struct net_device *dev, int ioaddr); static void hpp_reset_8390(struct net_device *dev); static int hpp_open(struct net_device *dev); static int hpp_close(struct net_device *dev); static void hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void hpp_mem_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void hpp_io_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); /* Probe a list of addresses for an HP LAN+ adaptor. This routine is almost boilerplate. */ static int __init do_hpp_probe(struct net_device *dev) { int i; int base_addr = dev->base_addr; int irq = dev->irq; if (base_addr > 0x1ff) /* Check a single specified location. */ return hpp_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ return -ENXIO; for (i = 0; hpplus_portlist[i]; i++) { if (hpp_probe1(dev, hpplus_portlist[i]) == 0) return 0; dev->irq = irq; } return -ENODEV; } #ifndef MODULE struct net_device * __init hp_plus_probe(int unit) { struct net_device *dev = alloc_eip_netdev(); int err; if (!dev) return ERR_PTR(-ENOMEM); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_hpp_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static const struct net_device_ops hpp_netdev_ops = { .ndo_open = hpp_open, .ndo_stop = hpp_close, .ndo_start_xmit = eip_start_xmit, .ndo_tx_timeout = eip_tx_timeout, .ndo_get_stats = eip_get_stats, .ndo_set_multicast_list = eip_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = eip_poll, #endif }; /* Do the interesting part of the probe at a single address. */ static int __init hpp_probe1(struct net_device *dev, int ioaddr) { int i, retval; unsigned char checksum = 0; const char name[] = "HP-PC-LAN+"; int mem_start; static unsigned version_printed; if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) return -EBUSY; /* Check for the HP+ signature, 50 48 0x 53. */ if (inw(ioaddr + HP_ID) != 0x4850 || (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) { retval = -ENODEV; goto out; } if (ei_debug && version_printed++ == 0) printk(version); printk("%s: %s at %#3x, ", dev->name, name, ioaddr); /* Retrieve and checksum the station address. */ outw(MAC_Page, ioaddr + HP_PAGING); for(i = 0; i < ETHER_ADDR_LEN; i++) { unsigned char inval = inb(ioaddr + 8 + i); dev->dev_addr[i] = inval; checksum += inval; } checksum += inb(ioaddr + 14); printk("%pM", dev->dev_addr); if (checksum != 0xff) { printk(" bad checksum %2.2x.\n", checksum); retval = -ENODEV; goto out; } else { /* Point at the Software Configuration Flags. */ outw(ID_Page, ioaddr + HP_PAGING); printk(" ID %4.4x", inw(ioaddr + 12)); } /* Read the IRQ line. */ outw(HW_Page, ioaddr + HP_PAGING); { int irq = inb(ioaddr + 13) & 0x0f; int option = inw(ioaddr + HPP_OPTION); dev->irq = irq; if (option & MemEnable) { mem_start = inw(ioaddr + 9) << 8; printk(", IRQ %d, memory address %#x.\n", irq, mem_start); } else { mem_start = 0; printk(", IRQ %d, programmed-I/O mode.\n", irq); } } /* Set the wrap registers for string I/O reads. */ outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); /* Set the base address to point to the NIC, not the "real" base! */ dev->base_addr = ioaddr + NIC_OFFSET; dev->netdev_ops = &hpp_netdev_ops; ei_status.name = name; ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */ ei_status.tx_start_page = HP_START_PG; ei_status.rx_start_page = HP_START_PG + TX_PAGES/2; ei_status.stop_page = HP_STOP_PG; ei_status.reset_8390 = &hpp_reset_8390; ei_status.block_input = &hpp_io_block_input; ei_status.block_output = &hpp_io_block_output; ei_status.get_8390_hdr = &hpp_io_get_8390_hdr; /* Check if the memory_enable flag is set in the option register. */ if (mem_start) { ei_status.block_input = &hpp_mem_block_input; ei_status.block_output = &hpp_mem_block_output; ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr; dev->mem_start = mem_start; ei_status.mem = ioremap(mem_start, (HP_STOP_PG - HP_START_PG)*256); if (!ei_status.mem) { retval = -ENOMEM; goto out; } ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256; dev->mem_end = ei_status.rmem_end = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256; } outw(Perf_Page, ioaddr + HP_PAGING); NS8390p_init(dev, 0); /* Leave the 8390 and HP chip reset. */ outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION); retval = register_netdev(dev); if (retval) goto out1; return 0; out1: iounmap(ei_status.mem); out: release_region(ioaddr, HP_IO_EXTENT); return retval; } static int hpp_open(struct net_device *dev) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg; int retval; if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { return retval; } /* Reset the 8390 and HP chip. */ option_reg = inw(ioaddr + HPP_OPTION); outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); udelay(5); /* Unreset the board and enable interrupts. */ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); /* Set the wrap registers for programmed-I/O operation. */ outw(HW_Page, ioaddr + HP_PAGING); outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); /* Select the operational page. */ outw(Perf_Page, ioaddr + HP_PAGING); return eip_open(dev); } static int hpp_close(struct net_device *dev) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); free_irq(dev->irq, dev); eip_close(dev); outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset, ioaddr + HPP_OPTION); return 0; } static void hpp_reset_8390(struct net_device *dev) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); /* Pause a few cycles for the hardware reset to take place. */ udelay(5); ei_status.txing = 0; outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); udelay(5); if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) printk("%s: hp_reset_8390() did not complete.\n", dev->name); if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); } /* The programmed-I/O version of reading the 4 byte 8390 specific header. Note that transfer with the EtherTwist+ must be on word boundaries. */ static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int ioaddr = dev->base_addr - NIC_OFFSET; outw((ring_page<<8), ioaddr + HPP_IN_ADDR); insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); } /* Block input and output, similar to the Crynwr packet driver. */ static void hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { int ioaddr = dev->base_addr - NIC_OFFSET; char *buf = skb->data; outw(ring_offset, ioaddr + HPP_IN_ADDR); insw(ioaddr + HP_DATAPORT, buf, count>>1); if (count & 0x01) buf[count-1] = inw(ioaddr + HP_DATAPORT); } /* The corresponding shared memory versions of the above 2 functions. */ static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); outw((ring_page<<8), ioaddr + HPP_IN_ADDR); outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr)); outw(option_reg, ioaddr + HPP_OPTION); hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */ } static void hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); outw(ring_offset, ioaddr + HPP_IN_ADDR); outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); /* Caution: this relies on get_8390_hdr() rounding up count! Also note that we *can't* use eth_io_copy_and_sum() because it will not always copy "count" bytes (e.g. padded IP). */ memcpy_fromio(skb->data, ei_status.mem, count); outw(option_reg, ioaddr + HPP_OPTION); } /* A special note: we *must* always transfer >=16 bit words. It's always safe to round up, so we do. */ static void hpp_io_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { int ioaddr = dev->base_addr - NIC_OFFSET; outw(start_page << 8, ioaddr + HPP_OUT_ADDR); outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2); } static void hpp_mem_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); outw(start_page << 8, ioaddr + HPP_OUT_ADDR); outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); memcpy_toio(ei_status.mem, buf, (count + 3) & ~3); outw(option_reg, ioaddr + HPP_OPTION); } #ifdef MODULE #define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */ static struct net_device *dev_hpp[MAX_HPP_CARDS]; static int io[MAX_HPP_CARDS]; static int irq[MAX_HPP_CARDS]; module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(io, "I/O port address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected"); MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver"); MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { if (io[this_dev] == 0) { if (this_dev != 0) break; /* only autoprobe 1st one */ printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n"); } dev = alloc_eip_netdev(); if (!dev) break; dev->irq = irq[this_dev]; dev->base_addr = io[this_dev]; if (do_hpp_probe(dev) == 0) { dev_hpp[found++] = dev; continue; } free_netdev(dev); printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]); break; } if (found) return 0; return -ENXIO; } static void cleanup_card(struct net_device *dev) { /* NB: hpp_close() handles free_irq */ iounmap(ei_status.mem); release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); } void __exit cleanup_module(void) { int this_dev; for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { struct net_device *dev = dev_hpp[this_dev]; if (dev) { unregister_netdev(dev); cleanup_card(dev); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
cricard13/linux-raspberry-nfc
drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
4353
31969
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * jim liu <jim.liu@intel.com> * Jackie Li<yaodong.li@intel.com> */ #include "mdfld_dsi_dpi.h" #include "mdfld_output.h" #include "mdfld_dsi_pkg_sender.h" #include "psb_drv.h" #include "tc35876x-dsi-lvds.h" static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe); static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe) { u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) { udelay(100); timeout++; } if (timeout == 20000) DRM_INFO("MIPI: HS Data FIFO was never cleared!\n"); } static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe) { u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_CTRL_FULL)) { udelay(100); timeout++; } if (timeout == 20000) DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n"); } static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe) { u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) { udelay(100); timeout++; } if (timeout == 20000) DRM_ERROR("MIPI: DPI FIFO was never cleared\n"); } static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe) { u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) & DSI_INTR_STATE_SPL_PKG_SENT))) { udelay(100); timeout++; } if (timeout == 20000) DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n"); } /* For TC35876X */ static void dsi_set_device_ready_state(struct drm_device *dev, int state, int pipe) { REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0); } static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, int state, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; u32 dspcntr = dev_priv->dspcntr[pipe]; u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; if (pipe) { pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; } else mipi &= (~0x03); if (state) { /*Set up pipe */ REG_WRITE(pipeconf_reg, BIT(31)); if (REG_BIT_WAIT(pipeconf_reg, 1, 30)) dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n", __func__); /*Set up display plane */ REG_WRITE(dspcntr_reg, dspcntr); } else { u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE; /* Put DSI lanes to ULPS to disable pipe */ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1); REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */ /* LP Hold */ REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16); REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */ /* Disable display plane */ REG_FLD_MOD(dspcntr_reg, 0, 31, 31); /* Flush the plane changes ??? posted write? */ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); REG_READ(dspbase_reg); /* Disable PIPE */ REG_FLD_MOD(pipeconf_reg, 0, 31, 31); if (REG_BIT_WAIT(pipeconf_reg, 0, 30)) dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n", __func__); if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28)) dev_err(&dev->pdev->dev, "%s: FIFO not empty\n", __func__); } } static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder, int pipe) { struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (!dev_priv->dpi_panel_on[pipe]) { dev_err(dev->dev, "DPI panel is already off\n"); return; } tc35876x_toshiba_bridge_panel_off(dev); tc35876x_set_bridge_reset_state(dev, 1); dsi_set_pipe_plane_enable_state(dev, 0, pipe); mdfld_dsi_dpi_shut_down(dpi_output, pipe); dsi_set_device_ready_state(dev, 0, pipe); } static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder, int pipe) { struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (dev_priv->dpi_panel_on[pipe]) { dev_err(dev->dev, "DPI panel is already on\n"); return; } /* For resume path sequence */ mdfld_dsi_dpi_shut_down(dpi_output, pipe); dsi_set_device_ready_state(dev, 0, pipe); dsi_set_device_ready_state(dev, 1, pipe); tc35876x_set_bridge_reset_state(dev, 0); tc35876x_configure_lvds_bridge(dev); mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */ dsi_set_pipe_plane_enable_state(dev, 1, pipe); } /* End for TC35876X */ /* ************************************************************************* *\ * FUNCTION: mdfld_dsi_tpo_ic_init * * DESCRIPTION: This function is called only by mrst_dsi_mode_set and * restore_display_registers. since this function does not * acquire the mutex, it is important that the calling function * does! \* ************************************************************************* */ static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe) { struct drm_device *dev = dsi_config->dev; u32 dcsChannelNumber = dsi_config->channel_num; u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); u32 gen_ctrl_val = GEN_LONG_WRITE; DRM_INFO("Enter mrst init TPO MIPI display.\n"); gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS; /* Flip page order */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00008036); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); /* 0xF0 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x005a5af0); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* Write protection key */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x005a5af1); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* 0xFC */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x005a5afc); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* 0xB7 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x770000b7); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000044); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS)); /* 0xB6 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000a0ab6); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* 0xF2 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x081010f2); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x4a070708); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000000c5); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); /* 0xF8 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x024003f8); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x01030a04); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x0e020220); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000004); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS)); /* 0xE2 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x398fc3e2); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x0000916f); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS)); /* 0xB0 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000000b0); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); /* 0xF4 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x240242f4); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x78ee2002); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2a071050); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x507fee10); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x10300710); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS)); /* 0xBA */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x19fe07ba); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x101c0a31); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000010); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); /* 0xBB */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x28ff07bb); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x24280a31); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000034); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); /* 0xFB */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x535d05fb); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1b1a2130); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x221e180e); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x131d2120); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x535d0508); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1c1a2131); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x231f160d); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x111b2220); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x535c2008); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1f1d2433); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2c251a10); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2c34372d); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000023); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); /* 0xFA */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x525c0bfa); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1c1c232f); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2623190e); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x18212625); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x545d0d0e); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1e1d2333); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x26231a10); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1a222725); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x545d280f); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x21202635); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x31292013); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x31393d33); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000029); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); /* Set DM */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000100f7); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); } static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count, int num_lane, int bpp) { return (u16)((pixel_clock_count * bpp) / (num_lane * 8)); } /* * Calculate the dpi time basing on a given drm mode @mode * return 0 on success. * FIXME: I was using proposed mode value for calculation, may need to * use crtc mode values later */ int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, struct mdfld_dsi_dpi_timing *dpi_timing, int num_lane, int bpp) { int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive; int pclk_vsync, pclk_vfp, pclk_vbp; pclk_hactive = mode->hdisplay; pclk_hfp = mode->hsync_start - mode->hdisplay; pclk_hsync = mode->hsync_end - mode->hsync_start; pclk_hbp = mode->htotal - mode->hsync_end; pclk_vfp = mode->vsync_start - mode->vdisplay; pclk_vsync = mode->vsync_end - mode->vsync_start; pclk_vbp = mode->vtotal - mode->vsync_end; /* * byte clock counts were calculated by following formula * bclock_count = pclk_count * bpp / num_lane / 8 */ dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hsync, num_lane, bpp); dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hbp, num_lane, bpp); dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hfp, num_lane, bpp); dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hactive, num_lane, bpp); dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_vsync, num_lane, bpp); dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_vbp, num_lane, bpp); dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_vfp, num_lane, bpp); return 0; } void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; int lane_count = dsi_config->lane_count; struct mdfld_dsi_dpi_timing dpi_timing; struct drm_display_mode *mode = dsi_config->mode; u32 val; /*un-ready device*/ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0); /*init dsi adapter before kicking off*/ REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); /*enable all interrupts*/ REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); /*set up func_prg*/ val = lane_count; val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET; switch (dsi_config->bpp) { case 16: val |= DSI_DPI_COLOR_FORMAT_RGB565; break; case 18: val |= DSI_DPI_COLOR_FORMAT_RGB666; break; case 24: val |= DSI_DPI_COLOR_FORMAT_RGB888; break; default: DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp); } REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val); REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK); REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffff & DSI_LP_RX_TIMEOUT_MASK); /*max value: 20 clock cycles of txclkesc*/ REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK); /*min 21 txclkesc, max: ffffh*/ REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xffff & DSI_RESET_TIMER_MASK); REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), mode->vdisplay << 16 | mode->hdisplay); /*set DPI timing registers*/ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp); REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HBP_COUNT_REG(pipe), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HFP_COUNT_REG(pipe), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VBP_COUNT_REG(pipe), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VFP_COUNT_REG(pipe), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46); /*min: 7d0 max: 4e20*/ REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0); /*set up video mode*/ val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE; REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val); REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); /*TODO: figure out how to setup these registers*/ if (mdfld_get_panel_type(dev, pipe) == TC35876X) REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); else REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408); REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); if (mdfld_get_panel_type(dev, pipe) == TC35876X) tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ /*set device ready*/ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0); } void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe) { struct drm_device *dev = output->dev; /* clear special packet sent bit */ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) REG_WRITE(MIPI_INTR_STAT_REG(pipe), DSI_INTR_STATE_SPL_PKG_SENT); /*send turn on package*/ REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON); /*wait for SPL_PKG_SENT interrupt*/ mdfld_wait_for_SPL_PKG_SENT(dev, pipe); if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) REG_WRITE(MIPI_INTR_STAT_REG(pipe), DSI_INTR_STATE_SPL_PKG_SENT); output->panel_on = 1; /* FIXME the following is disabled to WA the X slow start issue for TMD panel if (pipe == 2) dev_priv->dpi_panel_on2 = true; else if (pipe == 0) dev_priv->dpi_panel_on = true; */ } static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe) { struct drm_device *dev = output->dev; /*if output is on, or mode setting didn't happen, ignore this*/ if ((!output->panel_on) || output->first_boot) { output->first_boot = 0; return; } /* Wait for dpi fifo to empty */ mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe); /* Clear the special packet interrupt bit if set */ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) REG_WRITE(MIPI_INTR_STAT_REG(pipe), DSI_INTR_STATE_SPL_PKG_SENT); if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN) goto shutdown_out; REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN); shutdown_out: output->panel_on = 0; output->first_boot = 0; /* FIXME the following is disabled to WA the X slow start issue for TMD panel if (pipe == 2) dev_priv->dpi_panel_on2 = false; else if (pipe == 0) dev_priv->dpi_panel_on = false; */ } static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; /*start up display island if it was shutdown*/ if (!gma_power_begin(dev, true)) return; if (on) { if (mdfld_get_panel_type(dev, pipe) == TMD_VID) mdfld_dsi_dpi_turn_on(dpi_output, pipe); else if (mdfld_get_panel_type(dev, pipe) == TC35876X) mdfld_dsi_configure_up(dsi_encoder, pipe); else { /*enable mipi port*/ REG_WRITE(MIPI_PORT_CONTROL(pipe), REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31)); REG_READ(MIPI_PORT_CONTROL(pipe)); mdfld_dsi_dpi_turn_on(dpi_output, pipe); mdfld_dsi_tpo_ic_init(dsi_config, pipe); } dev_priv->dpi_panel_on[pipe] = true; } else { if (mdfld_get_panel_type(dev, pipe) == TMD_VID) mdfld_dsi_dpi_shut_down(dpi_output, pipe); else if (mdfld_get_panel_type(dev, pipe) == TC35876X) mdfld_dsi_configure_down(dsi_encoder, pipe); else { mdfld_dsi_dpi_shut_down(dpi_output, pipe); /*disable mipi port*/ REG_WRITE(MIPI_PORT_CONTROL(pipe), REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31)); REG_READ(MIPI_PORT_CONTROL(pipe)); } dev_priv->dpi_panel_on[pipe] = false; } gma_power_end(dev); } void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode) { mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON); } bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; if (fixed_mode) { adjusted_mode->hdisplay = fixed_mode->hdisplay; adjusted_mode->hsync_start = fixed_mode->hsync_start; adjusted_mode->hsync_end = fixed_mode->hsync_end; adjusted_mode->htotal = fixed_mode->htotal; adjusted_mode->vdisplay = fixed_mode->vdisplay; adjusted_mode->vsync_start = fixed_mode->vsync_start; adjusted_mode->vsync_end = fixed_mode->vsync_end; adjusted_mode->vtotal = fixed_mode->vtotal; adjusted_mode->clock = fixed_mode->clock; drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); } return true; } void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder) { mdfld_dsi_dpi_set_power(encoder, false); } void mdfld_dsi_dpi_commit(struct drm_encoder *encoder) { mdfld_dsi_dpi_set_power(encoder, true); } /* For TC35876X */ /* This functionality was implemented in FW in iCDK */ /* But removed in DV0 and later. So need to add here. */ static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff); REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff); REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14); REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff); REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25); REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0); REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820); REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); } static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; struct mdfld_dsi_dpi_timing dpi_timing; struct drm_display_mode *mode = dsi_config->mode; mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp); REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), mode->vdisplay << 16 | mode->hdisplay); REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HBP_COUNT_REG(pipe), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HFP_COUNT_REG(pipe), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VBP_COUNT_REG(pipe), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VFP_COUNT_REG(pipe), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); } static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; int lane_count = dsi_config->lane_count; if (pipe) { REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002); REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000); } else { REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000); REG_WRITE(MIPI_PORT_CONTROL(2), 0x00); } REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F); REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F); /* lane_count = 3 */ REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count); mdfld_mipi_set_video_timing(dsi_config, pipe); } static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; struct drm_display_mode *mode = dsi_config->mode; REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(HSYNC_A, ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1)); REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); REG_WRITE(VSYNC_A, ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1)); REG_WRITE(PIPEASRC, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); } /* End for TC35876X */ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; u32 pipeconf = dev_priv->pipeconf[pipe]; u32 dspcntr = dev_priv->dspcntr[pipe]; u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; if (pipe) { pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; } else { if (mdfld_get_panel_type(dev, pipe) == TC35876X) mipi &= (~0x03); /* Use all four lanes */ else mipi |= 2; } /*start up display island if it was shutdown*/ if (!gma_power_begin(dev, true)) return; if (mdfld_get_panel_type(dev, pipe) == TC35876X) { /* * The following logic is required to reset the bridge and * configure. This also starts the DSI clock at 200MHz. */ tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ tc35876x_toshiba_bridge_panel_on(dev); udelay(100); /* Now start the DSI clock */ REG_WRITE(MRST_DPLL_A, 0x00); REG_WRITE(MRST_FPA0, 0xC1); REG_WRITE(MRST_DPLL_A, 0x00800000); udelay(500); REG_WRITE(MRST_DPLL_A, 0x80800000); if (REG_BIT_WAIT(pipeconf_reg, 1, 29)) dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n", __func__); REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); mipi_set_properties(dsi_config, pipe); mdfld_mipi_config(dsi_config, pipe); mdfld_set_pipe_timing(dsi_config, pipe); REG_WRITE(DSPABASE, 0x00); REG_WRITE(DSPASIZE, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(DSPACNTR, 0x98000000); REG_WRITE(DSPASURF, 0x00); REG_WRITE(VGACNTRL, 0x80000000); REG_WRITE(DEVICE_READY_REG, 0x00000001); REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000); } else { /*set up mipi port FIXME: do at init time */ REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi); } REG_READ(MIPI_PORT_CONTROL(pipe)); if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { /* NOP */ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { /* set up DSI controller DPI interface */ mdfld_dsi_dpi_controller_init(dsi_config, pipe); /* Configure MIPI Bridge and Panel */ tc35876x_configure_lvds_bridge(dev); dev_priv->dpi_panel_on[pipe] = true; } else { /*turn on DPI interface*/ mdfld_dsi_dpi_turn_on(dpi_output, pipe); } /*set up pipe*/ REG_WRITE(pipeconf_reg, pipeconf); REG_READ(pipeconf_reg); /*set up display plane*/ REG_WRITE(dspcntr_reg, dspcntr); REG_READ(dspcntr_reg); msleep(20); /* FIXME: this should wait for vblank */ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { /* NOP */ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { mdfld_dsi_dpi_turn_on(dpi_output, pipe); } else { /* init driver ic */ mdfld_dsi_tpo_ic_init(dsi_config, pipe); /*init backlight*/ mdfld_dsi_brightness_init(dsi_config, pipe); } gma_power_end(dev); } /* * Init DSI DPI encoder. * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector * return pointer of newly allocated DPI encoder, NULL on error */ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, struct mdfld_dsi_connector *dsi_connector, const struct panel_funcs *p_funcs) { struct mdfld_dsi_dpi_output *dpi_output = NULL; struct mdfld_dsi_config *dsi_config; struct drm_connector *connector = NULL; struct drm_encoder *encoder = NULL; int pipe; u32 data; int ret; pipe = dsi_connector->pipe; if (mdfld_get_panel_type(dev, pipe) != TC35876X) { dsi_config = mdfld_dsi_get_config(dsi_connector); /* panel hard-reset */ if (p_funcs->reset) { ret = p_funcs->reset(pipe); if (ret) { DRM_ERROR("Panel %d hard-reset failed\n", pipe); return NULL; } } /* panel drvIC init */ if (p_funcs->drv_ic_init) p_funcs->drv_ic_init(dsi_config, pipe); /* panel power mode detect */ ret = mdfld_dsi_get_power_mode(dsi_config, &data, false); if (ret) { DRM_ERROR("Panel %d get power mode failed\n", pipe); dsi_connector->status = connector_status_disconnected; } else { DRM_INFO("pipe %d power mode 0x%x\n", pipe, data); dsi_connector->status = connector_status_connected; } } dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL); if (!dpi_output) { DRM_ERROR("No memory\n"); return NULL; } if (dsi_connector->pipe) dpi_output->panel_on = 0; else dpi_output->panel_on = 0; dpi_output->dev = dev; if (mdfld_get_panel_type(dev, pipe) != TC35876X) dpi_output->p_funcs = p_funcs; dpi_output->first_boot = 1; /*get fixed mode*/ dsi_config = mdfld_dsi_get_config(dsi_connector); /*create drm encoder object*/ connector = &dsi_connector->base.base; encoder = &dpi_output->base.base.base; drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); /*attach to given connector*/ drm_mode_connector_attach_encoder(connector, encoder); /*set possible crtcs and clones*/ if (dsi_connector->pipe) { encoder->possible_crtcs = (1 << 2); encoder->possible_clones = (1 << 1); } else { encoder->possible_crtcs = (1 << 0); encoder->possible_clones = (1 << 0); } dsi_connector->base.encoder = &dpi_output->base.base; return &dpi_output->base; }
gpl-2.0
lawnn/caf_kernel_msm
drivers/ata/pata_artop.c
5121
12604
/* * pata_artop.c - ARTOP ATA controller driver * * (C) 2006 Red Hat * (C) 2007,2011 Bartlomiej Zolnierkiewicz * * Based in part on drivers/ide/pci/aec62xx.c * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> * 865/865R fixes for Macintosh card version from a patch to the old * driver by Thibaut VARENE <varenet@parisc-linux.org> * When setting the PCI latency we must set 0x80 or higher for burst * performance Alessandro Zummo <alessandro.zummo@towertech.it> * * TODO * Investigate no_dsc on 850R * Clock detect */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_artop" #define DRV_VERSION "0.4.6" /* * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we * get PCI bus speed functionality we leave this as 0. Its a variable * for when we get the functionality and also for folks wanting to * test stuff. */ static int clock = 0; /** * artop62x0_pre_reset - probe begin * @link: link * @deadline: deadline jiffies for the operation * * Nothing complicated needed here. */ static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits artop_enable_bits[] = { { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Odd numbered device ids are the units with enable bits. */ if ((pdev->device & 1) && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * artop6260_cable_detect - identify cable type * @ap: Port * * Identify the cable type for the ARTOP interface in question */ static int artop6260_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 tmp; pci_read_config_byte(pdev, 0x49, &tmp); if (tmp & (1 << ap->port_no)) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } /** * artop6210_load_piomode - Load a set of PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device * @pio: PIO mode * * Set PIO mode for device, in host controller PCI config space. This * is used both to set PIO timings in PIO mode and also to set the * matching PIO clocking for UDMA, as well as the MWDMA timings. * * LOCKING: * None (inherited from caller). */ static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; const u16 timing[2][5] = { { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 }, { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 } }; /* Load the PIO timing active/recovery bits */ pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]); } /** * artop6210_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * * Set PIO mode for device, in host controller PCI config space. For * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In * the event UDMA is used the later call to set_dmamode will set the * bits as required. * * LOCKING: * None (inherited from caller). */ static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; u8 ultra; artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */ pci_read_config_byte(pdev, 0x54, &ultra); ultra &= ~(3 << (2 * dn)); pci_write_config_byte(pdev, 0x54, ultra); } /** * artop6260_load_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * @pio: PIO mode * * Set PIO mode for device, in host controller PCI config space. The * ARTOP6260 and relatives store the timing data differently. * * LOCKING: * None (inherited from caller). */ static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; const u8 timing[2][5] = { { 0x00, 0x0A, 0x08, 0x33, 0x31 }, { 0x70, 0x7A, 0x78, 0x43, 0x41 } }; /* Load the PIO timing active/recovery bits */ pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]); } /** * artop6260_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * * Set PIO mode for device, in host controller PCI config space. For * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In * the event UDMA is used the later call to set_dmamode will set the * bits as required. * * LOCKING: * None (inherited from caller). */ static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 ultra; artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */ pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */ pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); } /** * artop6210_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device whose timings we are configuring * * Set DMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = adev->devno + 2 * ap->port_no; u8 ultra; if (adev->dma_mode == XFER_MW_DMA_0) pio = 1; else pio = 4; /* Load the PIO timing active/recovery bits */ artop6210_load_piomode(ap, adev, pio); pci_read_config_byte(pdev, 0x54, &ultra); ultra &= ~(3 << (2 * dn)); /* Add ultra DMA bits if in UDMA mode */ if (adev->dma_mode >= XFER_UDMA_0) { u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock; if (mode == 0) mode = 1; ultra |= (mode << (2 * dn)); } pci_write_config_byte(pdev, 0x54, ultra); } /** * artop6260_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring * * Set DMA mode for device, in host controller PCI config space. The * ARTOP6260 and relatives store the timing data differently. * * LOCKING: * None (inherited from caller). */ static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 ultra; if (adev->dma_mode == XFER_MW_DMA_0) pio = 1; else pio = 4; /* Load the PIO timing active/recovery bits */ artop6260_load_piomode(ap, adev, pio); /* Add ultra DMA bits if in UDMA mode */ pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */ if (adev->dma_mode >= XFER_UDMA_0) { u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock; if (mode == 0) mode = 1; ultra |= (mode << (4 * adev->devno)); } pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); } /** * artop_6210_qc_defer - implement serialization * @qc: command * * Issue commands per host on this chip. */ static int artop6210_qc_defer(struct ata_queued_cmd *qc) { struct ata_host *host = qc->ap->host; struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; int rc; /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; /* Now apply serialization rules. Only allow a command if the other channel state machine is idle */ if (alt && alt->qc_active) return ATA_DEFER_PORT; return 0; } static struct scsi_host_template artop_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations artop6210_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = artop6210_set_piomode, .set_dmamode = artop6210_set_dmamode, .prereset = artop62x0_pre_reset, .qc_defer = artop6210_qc_defer, }; static struct ata_port_operations artop6260_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = artop6260_cable_detect, .set_piomode = artop6260_set_piomode, .set_dmamode = artop6260_set_dmamode, .prereset = artop62x0_pre_reset, }; static void atp8xx_fixup(struct pci_dev *pdev) { if (pdev->device == 0x0005) /* BIOS may have left us in UDMA, clear it before libata probe */ pci_write_config_byte(pdev, 0x54, 0); else if (pdev->device == 0x0008 || pdev->device == 0x0009) { u8 reg; /* Mac systems come up with some registers not set as we will need them */ /* Clear reset & test bits */ pci_read_config_byte(pdev, 0x49, &reg); pci_write_config_byte(pdev, 0x49, reg & ~0x30); /* PCI latency must be > 0x80 for burst mode, tweak it * if required. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg); if (reg <= 0x80) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); /* Enable IRQ output and burst mode */ pci_read_config_byte(pdev, 0x4a, &reg); pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); } } /** * artop_init_one - Register ARTOP ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in artop_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info_6210 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &artop6210_ops, }; static const struct ata_port_info info_626x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &artop6260_ops, }; static const struct ata_port_info info_628x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &artop6260_ops, }; static const struct ata_port_info info_628x_fast = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &artop6260_ops, }; const struct ata_port_info *ppi[] = { NULL, NULL }; int rc; ata_print_version_once(&pdev->dev, DRV_VERSION); rc = pcim_enable_device(pdev); if (rc) return rc; if (id->driver_data == 0) /* 6210 variant */ ppi[0] = &info_6210; else if (id->driver_data == 1) /* 6260 */ ppi[0] = &info_626x; else if (id->driver_data == 2) { /* 6280 or 6280 + fast */ unsigned long io = pci_resource_start(pdev, 4); ppi[0] = &info_628x; if (inb(io) & 0x10) ppi[0] = &info_628x_fast; } BUG_ON(ppi[0] == NULL); atp8xx_fixup(pdev); return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); } static const struct pci_device_id artop_pci_tbl[] = { { PCI_VDEVICE(ARTOP, 0x0005), 0 }, { PCI_VDEVICE(ARTOP, 0x0006), 1 }, { PCI_VDEVICE(ARTOP, 0x0007), 1 }, { PCI_VDEVICE(ARTOP, 0x0008), 2 }, { PCI_VDEVICE(ARTOP, 0x0009), 2 }, { } /* terminate list */ }; #ifdef CONFIG_PM static int atp8xx_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; atp8xx_fixup(pdev); ata_host_resume(host); return 0; } #endif static struct pci_driver artop_pci_driver = { .name = DRV_NAME, .id_table = artop_pci_tbl, .probe = artop_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = atp8xx_reinit_one, #endif }; static int __init artop_init(void) { return pci_register_driver(&artop_pci_driver); } static void __exit artop_exit(void) { pci_unregister_driver(&artop_pci_driver); } module_init(artop_init); module_exit(artop_exit); MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, artop_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
tamirda/G900F_PhoeniX_Kernel_Lollipop_OLD
arch/arm/mach-prima2/clock.c
5121
11214
/* * Clock tree for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/module.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/clkdev.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/mach/map.h> #include <mach/map.h> #define SIRFSOC_CLKC_CLK_EN0 0x0000 #define SIRFSOC_CLKC_CLK_EN1 0x0004 #define SIRFSOC_CLKC_REF_CFG 0x0014 #define SIRFSOC_CLKC_CPU_CFG 0x0018 #define SIRFSOC_CLKC_MEM_CFG 0x001c #define SIRFSOC_CLKC_SYS_CFG 0x0020 #define SIRFSOC_CLKC_IO_CFG 0x0024 #define SIRFSOC_CLKC_DSP_CFG 0x0028 #define SIRFSOC_CLKC_GFX_CFG 0x002c #define SIRFSOC_CLKC_MM_CFG 0x0030 #define SIRFSOC_LKC_LCD_CFG 0x0034 #define SIRFSOC_CLKC_MMC_CFG 0x0038 #define SIRFSOC_CLKC_PLL1_CFG0 0x0040 #define SIRFSOC_CLKC_PLL2_CFG0 0x0044 #define SIRFSOC_CLKC_PLL3_CFG0 0x0048 #define SIRFSOC_CLKC_PLL1_CFG1 0x004c #define SIRFSOC_CLKC_PLL2_CFG1 0x0050 #define SIRFSOC_CLKC_PLL3_CFG1 0x0054 #define SIRFSOC_CLKC_PLL1_CFG2 0x0058 #define SIRFSOC_CLKC_PLL2_CFG2 0x005c #define SIRFSOC_CLKC_PLL3_CFG2 0x0060 #define SIRFSOC_CLOCK_VA_BASE SIRFSOC_VA(0x005000) #define KHZ 1000 #define MHZ (KHZ * KHZ) struct clk_ops { unsigned long (*get_rate)(struct clk *clk); long (*round_rate)(struct clk *clk, unsigned long rate); int (*set_rate)(struct clk *clk, unsigned long rate); int (*enable)(struct clk *clk); int (*disable)(struct clk *clk); struct clk *(*get_parent)(struct clk *clk); int (*set_parent)(struct clk *clk, struct clk *parent); }; struct clk { struct clk *parent; /* parent clk */ unsigned long rate; /* clock rate in Hz */ signed char usage; /* clock enable count */ signed char enable_bit; /* enable bit: 0 ~ 63 */ unsigned short regofs; /* register offset */ struct clk_ops *ops; /* clock operation */ }; static DEFINE_SPINLOCK(clocks_lock); static inline unsigned long clkc_readl(unsigned reg) { return readl(SIRFSOC_CLOCK_VA_BASE + reg); } static inline void clkc_writel(u32 val, unsigned reg) { writel(val, SIRFSOC_CLOCK_VA_BASE + reg); } /* * osc_rtc - real time oscillator - 32.768KHz * osc_sys - high speed oscillator - 26MHz */ static struct clk clk_rtc = { .rate = 32768, }; static struct clk clk_osc = { .rate = 26 * MHZ, }; /* * std pll */ static unsigned long std_pll_get_rate(struct clk *clk) { unsigned long fin = clk_get_rate(clk->parent); u32 regcfg2 = clk->regofs + SIRFSOC_CLKC_PLL1_CFG2 - SIRFSOC_CLKC_PLL1_CFG0; if (clkc_readl(regcfg2) & BIT(2)) { /* pll bypass mode */ clk->rate = fin; } else { /* fout = fin * nf / nr / od */ u32 cfg0 = clkc_readl(clk->regofs); u32 nf = (cfg0 & (BIT(13) - 1)) + 1; u32 nr = ((cfg0 >> 13) & (BIT(6) - 1)) + 1; u32 od = ((cfg0 >> 19) & (BIT(4) - 1)) + 1; WARN_ON(fin % MHZ); clk->rate = fin / MHZ * nf / nr / od * MHZ; } return clk->rate; } static int std_pll_set_rate(struct clk *clk, unsigned long rate) { unsigned long fin, nf, nr, od, reg; /* * fout = fin * nf / (nr * od); * set od = 1, nr = fin/MHz, so fout = nf * MHz */ nf = rate / MHZ; if (unlikely((rate % MHZ) || nf > BIT(13) || nf < 1)) return -EINVAL; fin = clk_get_rate(clk->parent); BUG_ON(fin < MHZ); nr = fin / MHZ; BUG_ON((fin % MHZ) || nr > BIT(6)); od = 1; reg = (nf - 1) | ((nr - 1) << 13) | ((od - 1) << 19); clkc_writel(reg, clk->regofs); reg = clk->regofs + SIRFSOC_CLKC_PLL1_CFG1 - SIRFSOC_CLKC_PLL1_CFG0; clkc_writel((nf >> 1) - 1, reg); reg = clk->regofs + SIRFSOC_CLKC_PLL1_CFG2 - SIRFSOC_CLKC_PLL1_CFG0; while (!(clkc_readl(reg) & BIT(6))) cpu_relax(); clk->rate = 0; /* set to zero will force recalculation */ return 0; } static struct clk_ops std_pll_ops = { .get_rate = std_pll_get_rate, .set_rate = std_pll_set_rate, }; static struct clk clk_pll1 = { .parent = &clk_osc, .regofs = SIRFSOC_CLKC_PLL1_CFG0, .ops = &std_pll_ops, }; static struct clk clk_pll2 = { .parent = &clk_osc, .regofs = SIRFSOC_CLKC_PLL2_CFG0, .ops = &std_pll_ops, }; static struct clk clk_pll3 = { .parent = &clk_osc, .regofs = SIRFSOC_CLKC_PLL3_CFG0, .ops = &std_pll_ops, }; /* * clock domains - cpu, mem, sys/io */ static struct clk clk_mem; static struct clk *dmn_get_parent(struct clk *clk) { struct clk *clks[] = { &clk_osc, &clk_rtc, &clk_pll1, &clk_pll2, &clk_pll3 }; u32 cfg = clkc_readl(clk->regofs); WARN_ON((cfg & (BIT(3) - 1)) > 4); return clks[cfg & (BIT(3) - 1)]; } static int dmn_set_parent(struct clk *clk, struct clk *parent) { const struct clk *clks[] = { &clk_osc, &clk_rtc, &clk_pll1, &clk_pll2, &clk_pll3 }; u32 cfg = clkc_readl(clk->regofs); int i; for (i = 0; i < ARRAY_SIZE(clks); i++) { if (clks[i] == parent) { cfg &= ~(BIT(3) - 1); clkc_writel(cfg | i, clk->regofs); /* BIT(3) - switching status: 1 - busy, 0 - done */ while (clkc_readl(clk->regofs) & BIT(3)) cpu_relax(); return 0; } } return -EINVAL; } static unsigned long dmn_get_rate(struct clk *clk) { unsigned long fin = clk_get_rate(clk->parent); u32 cfg = clkc_readl(clk->regofs); if (cfg & BIT(24)) { /* fcd bypass mode */ clk->rate = fin; } else { /* * wait count: bit[19:16], hold count: bit[23:20] */ u32 wait = (cfg >> 16) & (BIT(4) - 1); u32 hold = (cfg >> 20) & (BIT(4) - 1); clk->rate = fin / (wait + hold + 2); } return clk->rate; } static int dmn_set_rate(struct clk *clk, unsigned long rate) { unsigned long fin; unsigned ratio, wait, hold, reg; unsigned bits = (clk == &clk_mem) ? 3 : 4; fin = clk_get_rate(clk->parent); ratio = fin / rate; if (unlikely(ratio < 2 || ratio > BIT(bits + 1))) return -EINVAL; WARN_ON(fin % rate); wait = (ratio >> 1) - 1; hold = ratio - wait - 2; reg = clkc_readl(clk->regofs); reg &= ~(((BIT(bits) - 1) << 16) | ((BIT(bits) - 1) << 20)); reg |= (wait << 16) | (hold << 20) | BIT(25); clkc_writel(reg, clk->regofs); /* waiting FCD been effective */ while (clkc_readl(clk->regofs) & BIT(25)) cpu_relax(); clk->rate = 0; /* set to zero will force recalculation */ return 0; } /* * cpu clock has no FCD register in Prima2, can only change pll */ static int cpu_set_rate(struct clk *clk, unsigned long rate) { int ret1, ret2; struct clk *cur_parent, *tmp_parent; cur_parent = dmn_get_parent(clk); BUG_ON(cur_parent == NULL || cur_parent->usage > 1); /* switch to tmp pll before setting parent clock's rate */ tmp_parent = cur_parent == &clk_pll1 ? &clk_pll2 : &clk_pll1; ret1 = dmn_set_parent(clk, tmp_parent); BUG_ON(ret1); ret2 = clk_set_rate(cur_parent, rate); ret1 = dmn_set_parent(clk, cur_parent); clk->rate = 0; /* set to zero will force recalculation */ return ret2 ? ret2 : ret1; } static struct clk_ops cpu_ops = { .get_parent = dmn_get_parent, .set_parent = dmn_set_parent, .set_rate = cpu_set_rate, }; static struct clk clk_cpu = { .parent = &clk_pll1, .regofs = SIRFSOC_CLKC_CPU_CFG, .ops = &cpu_ops, }; static struct clk_ops msi_ops = { .set_rate = dmn_set_rate, .get_rate = dmn_get_rate, .set_parent = dmn_set_parent, .get_parent = dmn_get_parent, }; static struct clk clk_mem = { .parent = &clk_pll2, .regofs = SIRFSOC_CLKC_MEM_CFG, .ops = &msi_ops, }; static struct clk clk_sys = { .parent = &clk_pll3, .regofs = SIRFSOC_CLKC_SYS_CFG, .ops = &msi_ops, }; static struct clk clk_io = { .parent = &clk_pll3, .regofs = SIRFSOC_CLKC_IO_CFG, .ops = &msi_ops, }; /* * on-chip clock sets */ static struct clk_lookup onchip_clks[] = { { .dev_id = "rtc", .clk = &clk_rtc, }, { .dev_id = "osc", .clk = &clk_osc, }, { .dev_id = "pll1", .clk = &clk_pll1, }, { .dev_id = "pll2", .clk = &clk_pll2, }, { .dev_id = "pll3", .clk = &clk_pll3, }, { .dev_id = "cpu", .clk = &clk_cpu, }, { .dev_id = "mem", .clk = &clk_mem, }, { .dev_id = "sys", .clk = &clk_sys, }, { .dev_id = "io", .clk = &clk_io, }, }; int clk_enable(struct clk *clk) { unsigned long flags; if (unlikely(IS_ERR_OR_NULL(clk))) return -EINVAL; if (clk->parent) clk_enable(clk->parent); spin_lock_irqsave(&clocks_lock, flags); if (!clk->usage++ && clk->ops && clk->ops->enable) clk->ops->enable(clk); spin_unlock_irqrestore(&clocks_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { unsigned long flags; if (unlikely(IS_ERR_OR_NULL(clk))) return; WARN_ON(!clk->usage); spin_lock_irqsave(&clocks_lock, flags); if (--clk->usage == 0 && clk->ops && clk->ops->disable) clk->ops->disable(clk); spin_unlock_irqrestore(&clocks_lock, flags); if (clk->parent) clk_disable(clk->parent); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { if (unlikely(IS_ERR_OR_NULL(clk))) return 0; if (clk->rate) return clk->rate; if (clk->ops && clk->ops->get_rate) return clk->ops->get_rate(clk); return clk_get_rate(clk->parent); } EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { if (unlikely(IS_ERR_OR_NULL(clk))) return 0; if (clk->ops && clk->ops->round_rate) return clk->ops->round_rate(clk, rate); return 0; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { if (unlikely(IS_ERR_OR_NULL(clk))) return -EINVAL; if (!clk->ops || !clk->ops->set_rate) return -EINVAL; return clk->ops->set_rate(clk, rate); } EXPORT_SYMBOL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { int ret; unsigned long flags; if (unlikely(IS_ERR_OR_NULL(clk))) return -EINVAL; if (!clk->ops || !clk->ops->set_parent) return -EINVAL; spin_lock_irqsave(&clocks_lock, flags); ret = clk->ops->set_parent(clk, parent); if (!ret) { parent->usage += clk->usage; clk->parent->usage -= clk->usage; BUG_ON(clk->parent->usage < 0); clk->parent = parent; } spin_unlock_irqrestore(&clocks_lock, flags); return ret; } EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { unsigned long flags; if (unlikely(IS_ERR_OR_NULL(clk))) return NULL; if (!clk->ops || !clk->ops->get_parent) return clk->parent; spin_lock_irqsave(&clocks_lock, flags); clk->parent = clk->ops->get_parent(clk); spin_unlock_irqrestore(&clocks_lock, flags); return clk->parent; } EXPORT_SYMBOL(clk_get_parent); static void __init sirfsoc_clk_init(void) { clkdev_add_table(onchip_clks, ARRAY_SIZE(onchip_clks)); } static struct of_device_id clkc_ids[] = { { .compatible = "sirf,prima2-clkc" }, {}, }; void __init sirfsoc_of_clk_init(void) { struct device_node *np; struct resource res; struct map_desc sirfsoc_clkc_iodesc = { .virtual = SIRFSOC_CLOCK_VA_BASE, .type = MT_DEVICE, }; np = of_find_matching_node(NULL, clkc_ids); if (!np) panic("unable to find compatible clkc node in dtb\n"); if (of_address_to_resource(np, 0, &res)) panic("unable to find clkc range in dtb"); of_node_put(np); sirfsoc_clkc_iodesc.pfn = __phys_to_pfn(res.start); sirfsoc_clkc_iodesc.length = 1 + res.end - res.start; iotable_init(&sirfsoc_clkc_iodesc, 1); sirfsoc_clk_init(); }
gpl-2.0
TV-LP51-Devices/kernel_lge_g3
drivers/staging/rts_pstor/sd.c
5121
121425
/* Driver for Realtek PCI-Express card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include "rtsx.h" #include "rtsx_transport.h" #include "rtsx_scsi.h" #include "rtsx_card.h" #include "sd.h" #define SD_MAX_RETRY_COUNT 3 static u16 REG_SD_CFG1; static u16 REG_SD_CFG2; static u16 REG_SD_CFG3; static u16 REG_SD_STAT1; static u16 REG_SD_STAT2; static u16 REG_SD_BUS_STAT; static u16 REG_SD_PAD_CTL; static u16 REG_SD_SAMPLE_POINT_CTL; static u16 REG_SD_PUSH_POINT_CTL; static u16 REG_SD_CMD0; static u16 REG_SD_CMD1; static u16 REG_SD_CMD2; static u16 REG_SD_CMD3; static u16 REG_SD_CMD4; static u16 REG_SD_CMD5; static u16 REG_SD_BYTE_CNT_L; static u16 REG_SD_BYTE_CNT_H; static u16 REG_SD_BLOCK_CNT_L; static u16 REG_SD_BLOCK_CNT_H; static u16 REG_SD_TRANSFER; static u16 REG_SD_VPCLK0_CTL; static u16 REG_SD_VPCLK1_CTL; static u16 REG_SD_DCMPS0_CTL; static u16 REG_SD_DCMPS1_CTL; static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code) { struct sd_info *sd_card = &(chip->sd_card); sd_card->err_code |= err_code; } static inline void sd_clr_err_code(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); sd_card->err_code = 0; } static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code) { struct sd_info *sd_card = &(chip->sd_card); return sd_card->err_code & err_code; } static void sd_init_reg_addr(struct rtsx_chip *chip) { if (CHECK_PID(chip, 0x5209)) { REG_SD_CFG1 = SD_CFG1; REG_SD_CFG2 = SD_CFG2; REG_SD_CFG3 = SD_CFG3; REG_SD_STAT1 = SD_STAT1; REG_SD_STAT2 = SD_STAT2; REG_SD_BUS_STAT = SD_BUS_STAT; REG_SD_PAD_CTL = SD_PAD_CTL; REG_SD_SAMPLE_POINT_CTL = SD_SAMPLE_POINT_CTL; REG_SD_PUSH_POINT_CTL = SD_PUSH_POINT_CTL; REG_SD_CMD0 = SD_CMD0; REG_SD_CMD1 = SD_CMD1; REG_SD_CMD2 = SD_CMD2; REG_SD_CMD3 = SD_CMD3; REG_SD_CMD4 = SD_CMD4; REG_SD_CMD5 = SD_CMD5; REG_SD_BYTE_CNT_L = SD_BYTE_CNT_L; REG_SD_BYTE_CNT_H = SD_BYTE_CNT_H; REG_SD_BLOCK_CNT_L = SD_BLOCK_CNT_L; REG_SD_BLOCK_CNT_H = SD_BLOCK_CNT_H; REG_SD_TRANSFER = SD_TRANSFER; REG_SD_VPCLK0_CTL = SD_VPCLK0_CTL; REG_SD_VPCLK1_CTL = SD_VPCLK1_CTL; REG_SD_DCMPS0_CTL = SD_DCMPS0_CTL; REG_SD_DCMPS1_CTL = SD_DCMPS1_CTL; } else { REG_SD_CFG1 = 0xFD31; REG_SD_CFG2 = 0xFD33; REG_SD_CFG3 = 0xFD3E; REG_SD_STAT1 = 0xFD30; REG_SD_STAT2 = 0; REG_SD_BUS_STAT = 0; REG_SD_PAD_CTL = 0; REG_SD_SAMPLE_POINT_CTL = 0; REG_SD_PUSH_POINT_CTL = 0; REG_SD_CMD0 = 0xFD34; REG_SD_CMD1 = 0xFD35; REG_SD_CMD2 = 0xFD36; REG_SD_CMD3 = 0xFD37; REG_SD_CMD4 = 0xFD38; REG_SD_CMD5 = 0xFD5A; REG_SD_BYTE_CNT_L = 0xFD39; REG_SD_BYTE_CNT_H = 0xFD3A; REG_SD_BLOCK_CNT_L = 0xFD3B; REG_SD_BLOCK_CNT_H = 0xFD3C; REG_SD_TRANSFER = 0xFD32; REG_SD_VPCLK0_CTL = 0; REG_SD_VPCLK1_CTL = 0; REG_SD_DCMPS0_CTL = 0; REG_SD_DCMPS1_CTL = 0; } } static int sd_check_data0_status(struct rtsx_chip *chip) { u8 stat; if (CHECK_PID(chip, 0x5209)) { RTSX_READ_REG(chip, REG_SD_BUS_STAT, &stat); } else { RTSX_READ_REG(chip, REG_SD_STAT1, &stat); } if (!(stat & SD_DAT0_STATUS)) { sd_set_err_code(chip, SD_BUSY); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg, u8 rsp_type, u8 *rsp, int rsp_len) { struct sd_info *sd_card = &(chip->sd_card); int retval; int timeout = 100; u16 reg_addr; u8 *ptr; int stat_idx = 0; int rty_cnt = 0; sd_clr_err_code(chip); RTSX_DEBUGP("SD/MMC CMD %d, arg = 0x%08x\n", cmd_idx, arg); if (rsp_type == SD_RSP_TYPE_R1b) timeout = 3000; RTY_SEND_CMD: rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE); if (rsp_type == SD_RSP_TYPE_R2) { for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } stat_idx = 16; } else if (rsp_type != SD_RSP_TYPE_R0) { for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } stat_idx = 5; } rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0); retval = rtsx_send_cmd(chip, SD_CARD, timeout); if (retval < 0) { u8 val; rtsx_read_register(chip, REG_SD_STAT1, &val); RTSX_DEBUGP("SD_STAT1: 0x%x\n", val); if (CHECK_PID(chip, 0x5209)) { rtsx_read_register(chip, REG_SD_STAT2, &val); RTSX_DEBUGP("SD_STAT2: 0x%x\n", val); if (val & SD_RSP_80CLK_TIMEOUT) { rtsx_clear_sd_error(chip); sd_set_err_code(chip, SD_RSP_TIMEOUT); TRACE_RET(chip, STATUS_FAIL); } rtsx_read_register(chip, REG_SD_BUS_STAT, &val); RTSX_DEBUGP("SD_BUS_STAT: 0x%x\n", val); } else { rtsx_read_register(chip, REG_SD_CFG3, &val); RTSX_DEBUGP("SD_CFG3: 0x%x\n", val); } if (retval == -ETIMEDOUT) { if (rsp_type & SD_WAIT_BUSY_END) { retval = sd_check_data0_status(chip); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); TRACE_RET(chip, retval); } } else { sd_set_err_code(chip, SD_TO_ERR); } retval = STATUS_TIMEDOUT; } else { retval = STATUS_FAIL; } rtsx_clear_sd_error(chip); TRACE_RET(chip, retval); } if (rsp_type == SD_RSP_TYPE_R0) return STATUS_SUCCESS; ptr = rtsx_get_cmd_data(chip) + 1; if ((ptr[0] & 0xC0) != 0) { sd_set_err_code(chip, SD_STS_ERR); TRACE_RET(chip, STATUS_FAIL); } if (!(rsp_type & SD_NO_CHECK_CRC7)) { if (ptr[stat_idx] & SD_CRC7_ERR) { if (cmd_idx == WRITE_MULTIPLE_BLOCK) { sd_set_err_code(chip, SD_CRC_ERR); TRACE_RET(chip, STATUS_FAIL); } if (rty_cnt < SD_MAX_RETRY_COUNT) { wait_timeout(20); rty_cnt++; goto RTY_SEND_CMD; } else { sd_set_err_code(chip, SD_CRC_ERR); TRACE_RET(chip, STATUS_FAIL); } } } if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) { if ((cmd_idx != SEND_RELATIVE_ADDR) && (cmd_idx != SEND_IF_COND)) { if (cmd_idx != STOP_TRANSMISSION) { if (ptr[1] & 0x80) { TRACE_RET(chip, STATUS_FAIL); } } #ifdef SUPPORT_SD_LOCK if (ptr[1] & 0x7D) #else if (ptr[1] & 0x7F) #endif { RTSX_DEBUGP("ptr[1]: 0x%02x\n", ptr[1]); TRACE_RET(chip, STATUS_FAIL); } if (ptr[2] & 0xFF) { RTSX_DEBUGP("ptr[2]: 0x%02x\n", ptr[2]); TRACE_RET(chip, STATUS_FAIL); } if (ptr[3] & 0x80) { RTSX_DEBUGP("ptr[3]: 0x%02x\n", ptr[3]); TRACE_RET(chip, STATUS_FAIL); } if (ptr[3] & 0x01) { sd_card->sd_data_buf_ready = 1; } else { sd_card->sd_data_buf_ready = 0; } } } if (rsp && rsp_len) memcpy(rsp, ptr, rsp_len); return STATUS_SUCCESS; } static int sd_read_data(struct rtsx_chip *chip, u8 trans_mode, u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len, int timeout) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i; sd_clr_err_code(chip); if (!buf) buf_len = 0; if (buf_len > 512) { TRACE_RET(chip, STATUS_FAIL); } rtsx_init_cmd(chip); if (cmd_len) { RTSX_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40); for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i, 0xFF, cmd[i]); } } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, (u8)(byte_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, (u8)blk_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, (u8)(blk_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); if (trans_mode != SD_TM_AUTO_TUNING) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, trans_mode | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); retval = rtsx_send_cmd(chip, SD_CARD, timeout); if (retval < 0) { if (retval == -ETIMEDOUT) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); } TRACE_RET(chip, STATUS_FAIL); } if (buf && buf_len) { retval = rtsx_read_ppbuf(chip, buf, buf_len); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode, u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len, int timeout) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i; sd_clr_err_code(chip); if (!buf) buf_len = 0; if (buf_len > 512) { /* This function can't write data more than one page */ TRACE_RET(chip, STATUS_FAIL); } if (buf && buf_len) { retval = rtsx_write_ppbuf(chip, buf, buf_len); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } rtsx_init_cmd(chip); if (cmd_len) { RTSX_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40); for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i, 0xFF, cmd[i]); } } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, (u8)(byte_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, (u8)blk_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, (u8)(blk_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, trans_mode | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); retval = rtsx_send_cmd(chip, SD_CARD, timeout); if (retval < 0) { if (retval == -ETIMEDOUT) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); } TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_check_csd(struct rtsx_chip *chip, char check_wp) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i; u8 csd_ver, trans_speed; u8 rsp[16]; for (i = 0; i < 6; i++) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr, SD_RSP_TYPE_R2, rsp, 16); if (retval == STATUS_SUCCESS) break; } if (i == 6) { TRACE_RET(chip, STATUS_FAIL); } memcpy(sd_card->raw_csd, rsp + 1, 15); if (CHECK_PID(chip, 0x5209)) { RTSX_READ_REG(chip, REG_SD_CMD5, sd_card->raw_csd + 15); } RTSX_DEBUGP("CSD Response:\n"); RTSX_DUMP(sd_card->raw_csd, 16); csd_ver = (rsp[1] & 0xc0) >> 6; RTSX_DEBUGP("csd_ver = %d\n", csd_ver); trans_speed = rsp[4]; if ((trans_speed & 0x07) == 0x02) { if ((trans_speed & 0xf8) >= 0x30) { if (chip->asic_code) { sd_card->sd_clock = 47; } else { sd_card->sd_clock = CLK_50; } } else if ((trans_speed & 0xf8) == 0x28) { if (chip->asic_code) { sd_card->sd_clock = 39; } else { sd_card->sd_clock = CLK_40; } } else if ((trans_speed & 0xf8) == 0x20) { if (chip->asic_code) { sd_card->sd_clock = 29; } else { sd_card->sd_clock = CLK_30; } } else if ((trans_speed & 0xf8) >= 0x10) { if (chip->asic_code) { sd_card->sd_clock = 23; } else { sd_card->sd_clock = CLK_20; } } else if ((trans_speed & 0x08) >= 0x08) { if (chip->asic_code) { sd_card->sd_clock = 19; } else { sd_card->sd_clock = CLK_20; } } else { TRACE_RET(chip, STATUS_FAIL); } } else { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MMC_SECTOR_MODE(sd_card)) { sd_card->capacity = 0; } else { if ((!CHK_SD_HCXC(sd_card)) || (csd_ver == 0)) { u8 blk_size, c_size_mult; u16 c_size; blk_size = rsp[6] & 0x0F; c_size = ((u16)(rsp[7] & 0x03) << 10) + ((u16)rsp[8] << 2) + ((u16)(rsp[9] & 0xC0) >> 6); c_size_mult = (u8)((rsp[10] & 0x03) << 1); c_size_mult += (rsp[11] & 0x80) >> 7; sd_card->capacity = (((u32)(c_size + 1)) * (1 << (c_size_mult + 2))) << (blk_size - 9); } else { u32 total_sector = 0; total_sector = (((u32)rsp[8] & 0x3f) << 16) | ((u32)rsp[9] << 8) | (u32)rsp[10]; sd_card->capacity = (total_sector + 1) << 10; } } if (check_wp) { if (rsp[15] & 0x30) { chip->card_wp |= SD_CARD; } RTSX_DEBUGP("CSD WP Status: 0x%x\n", rsp[15]); } return STATUS_SUCCESS; } static int sd_set_sample_push_timing(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); if (CHECK_PID(chip, 0x5209)) { if (CHK_SD_SDR104(sd_card) || CHK_SD_SDR50(sd_card)) { RTSX_WRITE_REG(chip, SD_CFG1, 0x0C | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); RTSX_WRITE_REG(chip, CARD_CLK_SOURCE, 0xFF, CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, 0); } else if (CHK_SD_DDR50(sd_card) || CHK_MMC_DDR52(sd_card)) { RTSX_WRITE_REG(chip, SD_CFG1, 0x0C | SD_ASYNC_FIFO_NOT_RST, SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST); RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); RTSX_WRITE_REG(chip, CARD_CLK_SOURCE, 0xFF, CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, 0); RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); RTSX_WRITE_REG(chip, SD_SAMPLE_POINT_CTL, DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); } else { u8 val = 0; RTSX_WRITE_REG(chip, SD_CFG1, 0x0C, SD_20_MODE); RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); RTSX_WRITE_REG(chip, CARD_CLK_SOURCE, 0xFF, CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, 0); if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_AUTO) { val = SD20_TX_NEG_EDGE; } else if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY) { val = SD20_TX_14_AHEAD; } else { val = SD20_TX_NEG_EDGE; } RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, SD20_TX_SEL_MASK, val); if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_AUTO) { if (chip->asic_code) { if (CHK_SD_HS(sd_card) || CHK_MMC_52M(sd_card)) { val = SD20_RX_14_DELAY; } else { val = SD20_RX_POS_EDGE; } } else { val = SD20_RX_14_DELAY; } } else if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_DELAY) { val = SD20_RX_14_DELAY; } else { val = SD20_RX_POS_EDGE; } RTSX_WRITE_REG(chip, SD_SAMPLE_POINT_CTL, SD20_RX_SEL_MASK, val); } } else { u8 val = 0; if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY) { val |= 0x10; } if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_AUTO) { if (chip->asic_code) { if (CHK_SD_HS(sd_card) || CHK_MMC_52M(sd_card)) { if (val & 0x10) { val |= 0x04; } else { val |= 0x08; } } } else { if (val & 0x10) { val |= 0x04; } else { val |= 0x08; } } } else if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_DELAY) { if (val & 0x10) { val |= 0x04; } else { val |= 0x08; } } RTSX_WRITE_REG(chip, REG_SD_CFG1, 0x1C, val); } return STATUS_SUCCESS; } static void sd_choose_proper_clock(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); if (CHK_SD_SDR104(sd_card)) { if (chip->asic_code) { sd_card->sd_clock = chip->asic_sd_sdr104_clk; } else { sd_card->sd_clock = chip->fpga_sd_sdr104_clk; } } else if (CHK_SD_DDR50(sd_card)) { if (chip->asic_code) { sd_card->sd_clock = chip->asic_sd_ddr50_clk; } else { sd_card->sd_clock = chip->fpga_sd_ddr50_clk; } } else if (CHK_SD_SDR50(sd_card)) { if (chip->asic_code) { sd_card->sd_clock = chip->asic_sd_sdr50_clk; } else { sd_card->sd_clock = chip->fpga_sd_sdr50_clk; } } else if (CHK_SD_HS(sd_card)) { if (chip->asic_code) { sd_card->sd_clock = chip->asic_sd_hs_clk; } else { sd_card->sd_clock = chip->fpga_sd_hs_clk; } } else if (CHK_MMC_52M(sd_card) || CHK_MMC_DDR52(sd_card)) { if (chip->asic_code) { sd_card->sd_clock = chip->asic_mmc_52m_clk; } else { sd_card->sd_clock = chip->fpga_mmc_52m_clk; } } else if (CHK_MMC_26M(sd_card)) { if (chip->asic_code) { sd_card->sd_clock = 48; } else { sd_card->sd_clock = CLK_50; } } } static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div) { u8 mask = 0, val = 0; if (CHECK_PID(chip, 0x5209)) { mask = SD_CLK_DIVIDE_MASK; val = clk_div; } else { mask = 0x60; if (clk_div == SD_CLK_DIVIDE_0) { val = 0x00; } else if (clk_div == SD_CLK_DIVIDE_128) { val = 0x40; } else if (clk_div == SD_CLK_DIVIDE_256) { val = 0x20; } } RTSX_WRITE_REG(chip, REG_SD_CFG1, mask, val); return STATUS_SUCCESS; } static int sd_set_init_para(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; retval = sd_set_sample_push_timing(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } sd_choose_proper_clock(chip); retval = switch_clock(chip, sd_card->sd_clock); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } int sd_select_card(struct rtsx_chip *chip, int select) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd_idx, cmd_type; u32 addr; if (select) { cmd_idx = SELECT_CARD; cmd_type = SD_RSP_TYPE_R1; addr = sd_card->sd_addr; } else { cmd_idx = DESELECT_CARD; cmd_type = SD_RSP_TYPE_R0; addr = 0; } retval = sd_send_cmd_get_rsp(chip, cmd_idx, addr, cmd_type, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } #ifdef SUPPORT_SD_LOCK static int sd_update_lock_status(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 rsp[5]; retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, rsp, 5); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (rsp[1] & 0x02) { sd_card->sd_lock_status |= SD_LOCKED; } else { sd_card->sd_lock_status &= ~SD_LOCKED; } RTSX_DEBUGP("sd_card->sd_lock_status = 0x%x\n", sd_card->sd_lock_status); if (rsp[1] & 0x01) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } #endif static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state, u8 data_ready, int polling_cnt) { struct sd_info *sd_card = &(chip->sd_card); int retval, i; u8 rsp[5]; for (i = 0; i < polling_cnt; i++) { retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, rsp, 5); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (((rsp[3] & 0x1E) == state) && ((rsp[3] & 0x01) == data_ready)) { return STATUS_SUCCESS; } } TRACE_RET(chip, STATUS_FAIL); } static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage) { int retval; if (voltage == SD_IO_3V3) { if (chip->asic_code) { retval = rtsx_write_phy_register(chip, 0x08, 0x4FC0 | chip->phy_voltage); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { RTSX_WRITE_REG(chip, SD_PAD_CTL, SD_IO_USING_1V8, 0); } } else if (voltage == SD_IO_1V8) { if (chip->asic_code) { retval = rtsx_write_phy_register(chip, 0x08, 0x4C40 | chip->phy_voltage); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { RTSX_WRITE_REG(chip, SD_PAD_CTL, SD_IO_USING_1V8, SD_IO_USING_1V8); } } else { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_voltage_switch(struct rtsx_chip *chip) { int retval; u8 stat; RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, SD_CLK_TOGGLE_EN); retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } udelay(chip->sd_voltage_switch_delay); RTSX_READ_REG(chip, SD_BUS_STAT, &stat); if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS | SD_DAT0_STATUS)) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_FORCE_STOP); retval = sd_change_bank_voltage(chip, SD_IO_1V8); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } wait_timeout(50); RTSX_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN); wait_timeout(10); RTSX_READ_REG(chip, SD_BUS_STAT, &stat); if ((stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS | SD_DAT0_STATUS)) != (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS | SD_DAT0_STATUS)) { RTSX_DEBUGP("SD_BUS_STAT: 0x%x\n", stat); rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0); TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); return STATUS_SUCCESS; } static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir) { if (tune_dir == TUNE_RX) { RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RESET | DCM_RX); RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RX); } else { RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RESET | DCM_TX); RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_TX); } return STATUS_SUCCESS; } static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) { struct sd_info *sd_card = &(chip->sd_card); u16 SD_VP_CTL, SD_DCMPS_CTL; u8 val; int retval; int ddr_rx = 0; RTSX_DEBUGP("sd_change_phase (sample_point = %d, tune_dir = %d)\n", sample_point, tune_dir); if (tune_dir == TUNE_RX) { SD_VP_CTL = SD_VPRX_CTL; SD_DCMPS_CTL = SD_DCMPS_RX_CTL; if (CHK_SD_DDR50(sd_card)) { ddr_rx = 1; } } else { SD_VP_CTL = SD_VPTX_CTL; SD_DCMPS_CTL = SD_DCMPS_TX_CTL; } if (chip->asic_code) { RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, CHANGE_CLK); RTSX_WRITE_REG(chip, SD_VP_CTL, 0x1F, sample_point); RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0); } else { #ifdef CONFIG_RTS_PSTOR_DEBUG rtsx_read_register(chip, SD_VP_CTL, &val); RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); RTSX_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val); #endif if (ddr_rx) { RTSX_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE, PHASE_CHANGE); udelay(50); RTSX_WRITE_REG(chip, SD_VP_CTL, 0xFF, PHASE_CHANGE | PHASE_NOT_RESET | sample_point); } else { RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, CHANGE_CLK); udelay(50); RTSX_WRITE_REG(chip, SD_VP_CTL, 0xFF, PHASE_NOT_RESET | sample_point); } udelay(100); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE, DCMPS_CHANGE); rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE); retval = rtsx_send_cmd(chip, SD_CARD, 100); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, Fail); } val = *rtsx_get_cmd_data(chip); if (val & DCMPS_ERROR) { TRACE_GOTO(chip, Fail); } if ((val & DCMPS_CURRENT_PHASE) != sample_point) { TRACE_GOTO(chip, Fail); } RTSX_WRITE_REG(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0); if (ddr_rx) { RTSX_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE, 0); } else { RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0); } udelay(50); } RTSX_WRITE_REG(chip, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); return STATUS_SUCCESS; Fail: #ifdef CONFIG_RTS_PSTOR_DEBUG rtsx_read_register(chip, SD_VP_CTL, &val); RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); RTSX_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val); #endif rtsx_write_register(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0); rtsx_write_register(chip, SD_VP_CTL, PHASE_CHANGE, 0); wait_timeout(10); sd_reset_dcm(chip, tune_dir); return STATUS_FAIL; } static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd[5], buf[8]; retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } cmd[0] = 0x40 | SEND_SCR; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width, buf, 8, 250); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); TRACE_RET(chip, STATUS_FAIL); } memcpy(sd_card->raw_scr, buf, 8); if ((buf[0] & 0x0F) == 0) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group, u8 func_to_switch, u8 *buf, int buf_len) { u8 support_mask = 0, query_switch = 0, switch_busy = 0; int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0; if (func_group == SD_FUNC_GROUP_1) { support_offset = FUNCTION_GROUP1_SUPPORT_OFFSET; query_switch_offset = FUNCTION_GROUP1_QUERY_SWITCH_OFFSET; check_busy_offset = FUNCTION_GROUP1_CHECK_BUSY_OFFSET; switch (func_to_switch) { case HS_SUPPORT: support_mask = HS_SUPPORT_MASK; query_switch = HS_QUERY_SWITCH_OK; switch_busy = HS_SWITCH_BUSY; break; case SDR50_SUPPORT: support_mask = SDR50_SUPPORT_MASK; query_switch = SDR50_QUERY_SWITCH_OK; switch_busy = SDR50_SWITCH_BUSY; break; case SDR104_SUPPORT: support_mask = SDR104_SUPPORT_MASK; query_switch = SDR104_QUERY_SWITCH_OK; switch_busy = SDR104_SWITCH_BUSY; break; case DDR50_SUPPORT: support_mask = DDR50_SUPPORT_MASK; query_switch = DDR50_QUERY_SWITCH_OK; switch_busy = DDR50_SWITCH_BUSY; break; default: TRACE_RET(chip, STATUS_FAIL); } } else if (func_group == SD_FUNC_GROUP_3) { support_offset = FUNCTION_GROUP3_SUPPORT_OFFSET; query_switch_offset = FUNCTION_GROUP3_QUERY_SWITCH_OFFSET; check_busy_offset = FUNCTION_GROUP3_CHECK_BUSY_OFFSET; switch (func_to_switch) { case DRIVING_TYPE_A: support_mask = DRIVING_TYPE_A_MASK; query_switch = TYPE_A_QUERY_SWITCH_OK; switch_busy = TYPE_A_SWITCH_BUSY; break; case DRIVING_TYPE_C: support_mask = DRIVING_TYPE_C_MASK; query_switch = TYPE_C_QUERY_SWITCH_OK; switch_busy = TYPE_C_SWITCH_BUSY; break; case DRIVING_TYPE_D: support_mask = DRIVING_TYPE_D_MASK; query_switch = TYPE_D_QUERY_SWITCH_OK; switch_busy = TYPE_D_SWITCH_BUSY; break; default: TRACE_RET(chip, STATUS_FAIL); } } else if (func_group == SD_FUNC_GROUP_4) { support_offset = FUNCTION_GROUP4_SUPPORT_OFFSET; query_switch_offset = FUNCTION_GROUP4_QUERY_SWITCH_OFFSET; check_busy_offset = FUNCTION_GROUP4_CHECK_BUSY_OFFSET; switch (func_to_switch) { case CURRENT_LIMIT_400: support_mask = CURRENT_LIMIT_400_MASK; query_switch = CURRENT_LIMIT_400_QUERY_SWITCH_OK; switch_busy = CURRENT_LIMIT_400_SWITCH_BUSY; break; case CURRENT_LIMIT_600: support_mask = CURRENT_LIMIT_600_MASK; query_switch = CURRENT_LIMIT_600_QUERY_SWITCH_OK; switch_busy = CURRENT_LIMIT_600_SWITCH_BUSY; break; case CURRENT_LIMIT_800: support_mask = CURRENT_LIMIT_800_MASK; query_switch = CURRENT_LIMIT_800_QUERY_SWITCH_OK; switch_busy = CURRENT_LIMIT_800_SWITCH_BUSY; break; default: TRACE_RET(chip, STATUS_FAIL); } } else { TRACE_RET(chip, STATUS_FAIL); } if (func_group == SD_FUNC_GROUP_1) { if (!(buf[support_offset] & support_mask) || ((buf[query_switch_offset] & 0x0F) != query_switch)) { TRACE_RET(chip, STATUS_FAIL); } } /* Check 'Busy Status' */ if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) && ((buf[check_busy_offset] & switch_busy) == switch_busy)) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group, u8 func_to_switch, u8 bus_width) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd[5], buf[64]; RTSX_DEBUGP("sd_check_switch_mode (mode = %d, func_group = %d, func_to_switch = %d)\n", mode, func_group, func_to_switch); cmd[0] = 0x40 | SWITCH; cmd[1] = mode; if (func_group == SD_FUNC_GROUP_1) { cmd[2] = 0xFF; cmd[3] = 0xFF; cmd[4] = 0xF0 + func_to_switch; } else if (func_group == SD_FUNC_GROUP_3) { cmd[2] = 0xFF; cmd[3] = 0xF0 + func_to_switch; cmd[4] = 0xFF; } else if (func_group == SD_FUNC_GROUP_4) { cmd[2] = 0xFF; cmd[3] = 0x0F + (func_to_switch << 4); cmd[4] = 0xFF; } else { cmd[1] = SD_CHECK_MODE; cmd[2] = 0xFF; cmd[3] = 0xFF; cmd[4] = 0xFF; } retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width, buf, 64, 250); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); TRACE_RET(chip, STATUS_FAIL); } RTSX_DUMP(buf, 64); if (func_group == NO_ARGUMENT) { sd_card->func_group1_mask = buf[0x0D]; sd_card->func_group2_mask = buf[0x0B]; sd_card->func_group3_mask = buf[0x09]; sd_card->func_group4_mask = buf[0x07]; RTSX_DEBUGP("func_group1_mask = 0x%02x\n", buf[0x0D]); RTSX_DEBUGP("func_group2_mask = 0x%02x\n", buf[0x0B]); RTSX_DEBUGP("func_group3_mask = 0x%02x\n", buf[0x09]); RTSX_DEBUGP("func_group4_mask = 0x%02x\n", buf[0x07]); } else { /* Maximum current consumption, check whether current is acceptable; * bit[511:496] = 0x0000 means some error happaned. */ u16 cc = ((u16)buf[0] << 8) | buf[1]; RTSX_DEBUGP("Maximum current consumption: %dmA\n", cc); if ((cc == 0) || (cc > 800)) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_query_switch_result(chip, func_group, func_to_switch, buf, 64); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if ((cc > 400) || (func_to_switch > CURRENT_LIMIT_400)) { RTSX_WRITE_REG(chip, OCPPARA2, SD_OCP_THD_MASK, chip->sd_800mA_ocp_thd); RTSX_WRITE_REG(chip, CARD_PWR_CTL, PMOS_STRG_MASK, PMOS_STRG_800mA); } } return STATUS_SUCCESS; } static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch) { if (func_group == SD_FUNC_GROUP_1) { if (func_to_switch > HS_SUPPORT) { func_to_switch--; } } else if (func_group == SD_FUNC_GROUP_4) { if (func_to_switch > CURRENT_LIMIT_200) { func_to_switch--; } } return func_to_switch; } static int sd_check_switch(struct rtsx_chip *chip, u8 func_group, u8 func_to_switch, u8 bus_width) { int retval; int i; int switch_good = 0; for (i = 0; i < 3; i++) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group, func_to_switch, bus_width); if (retval == STATUS_SUCCESS) { u8 stat; retval = sd_check_switch_mode(chip, SD_SWITCH_MODE, func_group, func_to_switch, bus_width); if (retval == STATUS_SUCCESS) { switch_good = 1; break; } RTSX_READ_REG(chip, SD_STAT1, &stat); if (stat & SD_CRC16_ERR) { RTSX_DEBUGP("SD CRC16 error when switching mode\n"); TRACE_RET(chip, STATUS_FAIL); } } func_to_switch = downgrade_switch_mode(func_group, func_to_switch); wait_timeout(20); } if (!switch_good) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i; u8 func_to_switch = 0; /* Get supported functions */ retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT, NO_ARGUMENT, bus_width); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } sd_card->func_group1_mask &= ~(sd_card->sd_switch_fail); /* Function Group 1: Access Mode */ for (i = 0; i < 4; i++) { switch ((u8)(chip->sd_speed_prior >> (i*8))) { case SDR104_SUPPORT: if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) && chip->sdr104_en) { func_to_switch = SDR104_SUPPORT; } break; case DDR50_SUPPORT: if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) && chip->ddr50_en) { func_to_switch = DDR50_SUPPORT; } break; case SDR50_SUPPORT: if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) && chip->sdr50_en) { func_to_switch = SDR50_SUPPORT; } break; case HS_SUPPORT: if (sd_card->func_group1_mask & HS_SUPPORT_MASK) { func_to_switch = HS_SUPPORT; } break; default: continue; } if (func_to_switch) { break; } } RTSX_DEBUGP("SD_FUNC_GROUP_1: func_to_switch = 0x%02x", func_to_switch); #ifdef SUPPORT_SD_LOCK if ((sd_card->sd_lock_status & SD_SDR_RST) && (DDR50_SUPPORT == func_to_switch) && (sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) { func_to_switch = SDR50_SUPPORT; RTSX_DEBUGP("Using SDR50 instead of DDR50 for SD Lock\n"); } #endif if (func_to_switch) { retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch, bus_width); if (retval != STATUS_SUCCESS) { if (func_to_switch == SDR104_SUPPORT) { sd_card->sd_switch_fail = SDR104_SUPPORT_MASK; } else if (func_to_switch == DDR50_SUPPORT) { sd_card->sd_switch_fail = SDR104_SUPPORT_MASK | DDR50_SUPPORT_MASK; } else if (func_to_switch == SDR50_SUPPORT) { sd_card->sd_switch_fail = SDR104_SUPPORT_MASK | DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK; } TRACE_RET(chip, STATUS_FAIL); } if (func_to_switch == SDR104_SUPPORT) { SET_SD_SDR104(sd_card); } else if (func_to_switch == DDR50_SUPPORT) { SET_SD_DDR50(sd_card); } else if (func_to_switch == SDR50_SUPPORT) { SET_SD_SDR50(sd_card); } else { SET_SD_HS(sd_card); } } if (CHK_SD_DDR50(sd_card)) { RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0x06, 0x04); retval = sd_set_sample_push_timing(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } if (!func_to_switch || (func_to_switch == HS_SUPPORT)) { /* Do not try to switch current limit if the card doesn't * support UHS mode or we don't want it to support UHS mode */ return STATUS_SUCCESS; } /* Function Group 4: Current Limit */ func_to_switch = 0xFF; for (i = 0; i < 4; i++) { switch ((u8)(chip->sd_current_prior >> (i*8))) { case CURRENT_LIMIT_800: if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK) { func_to_switch = CURRENT_LIMIT_800; } break; case CURRENT_LIMIT_600: if (sd_card->func_group4_mask & CURRENT_LIMIT_600_MASK) { func_to_switch = CURRENT_LIMIT_600; } break; case CURRENT_LIMIT_400: if (sd_card->func_group4_mask & CURRENT_LIMIT_400_MASK) { func_to_switch = CURRENT_LIMIT_400; } break; case CURRENT_LIMIT_200: if (sd_card->func_group4_mask & CURRENT_LIMIT_200_MASK) { func_to_switch = CURRENT_LIMIT_200; } break; default: continue; } if (func_to_switch != 0xFF) { break; } } RTSX_DEBUGP("SD_FUNC_GROUP_4: func_to_switch = 0x%02x", func_to_switch); if (func_to_switch <= CURRENT_LIMIT_800) { retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch, bus_width); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_NO_CARD)) { TRACE_RET(chip, STATUS_FAIL); } } RTSX_DEBUGP("Switch current limit finished! (%d)\n", retval); } if (CHK_SD_DDR50(sd_card)) { RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0x06, 0); } return STATUS_SUCCESS; } static int sd_wait_data_idle(struct rtsx_chip *chip) { int retval = STATUS_TIMEDOUT; int i; u8 val = 0; for (i = 0; i < 100; i++) { RTSX_READ_REG(chip, SD_DATA_STATE, &val); if (val & SD_DATA_IDLE) { retval = STATUS_SUCCESS; break; } udelay(100); } RTSX_DEBUGP("SD_DATA_STATE: 0x%02x\n", val); return retval; } static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) { int retval; u8 cmd[5]; retval = sd_change_phase(chip, sample_point, TUNE_RX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } cmd[0] = 0x40 | SEND_TUNING_PATTERN; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100); if (retval != STATUS_SUCCESS) { (void)sd_wait_data_idle(chip); rtsx_clear_sd_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd[5]; retval = sd_change_phase(chip, sample_point, TUNE_RX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("sd ddr tuning rx\n"); retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } cmd[0] = 0x40 | SD_STATUS; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100); if (retval != STATUS_SUCCESS) { (void)sd_wait_data_idle(chip); rtsx_clear_sd_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd[5], bus_width; if (CHK_MMC_8BIT(sd_card)) { bus_width = SD_BUS_WIDTH_8; } else if (CHK_MMC_4BIT(sd_card)) { bus_width = SD_BUS_WIDTH_4; } else { bus_width = SD_BUS_WIDTH_1; } retval = sd_change_phase(chip, sample_point, TUNE_RX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("mmc ddr tuning rx\n"); cmd[0] = 0x40 | SEND_EXT_CSD; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1, bus_width, NULL, 0, 100); if (retval != STATUS_SUCCESS) { (void)sd_wait_data_idle(chip); rtsx_clear_sd_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) { struct sd_info *sd_card = &(chip->sd_card); int retval; retval = sd_change_phase(chip, sample_point, TUNE_TX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, SD_RSP_80CLK_TIMEOUT_EN); retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) { rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); TRACE_RET(chip, STATUS_FAIL); } } RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); return STATUS_SUCCESS; } static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd[5], bus_width; retval = sd_change_phase(chip, sample_point, TUNE_TX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_SD(sd_card)) { bus_width = SD_BUS_WIDTH_4; } else { if (CHK_MMC_8BIT(sd_card)) { bus_width = SD_BUS_WIDTH_8; } else if (CHK_MMC_4BIT(sd_card)) { bus_width = SD_BUS_WIDTH_4; } else { bus_width = SD_BUS_WIDTH_1; } } retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, SD_RSP_80CLK_TIMEOUT_EN); cmd[0] = 0x40 | PROGRAM_CSD; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); return STATUS_SUCCESS; } static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, u8 tune_dir) { struct sd_info *sd_card = &(chip->sd_card); struct timing_phase_path path[MAX_PHASE + 1]; int i, j, cont_path_cnt; int new_block, max_len, final_path_idx; u8 final_phase = 0xFF; if (phase_map == 0xFFFFFFFF) { if (tune_dir == TUNE_RX) { final_phase = (u8)chip->sd_default_rx_phase; } else { final_phase = (u8)chip->sd_default_tx_phase; } goto Search_Finish; } cont_path_cnt = 0; new_block = 1; j = 0; for (i = 0; i < MAX_PHASE + 1; i++) { if (phase_map & (1 << i)) { if (new_block) { new_block = 0; j = cont_path_cnt++; path[j].start = i; path[j].end = i; } else { path[j].end = i; } } else { new_block = 1; if (cont_path_cnt) { int idx = cont_path_cnt - 1; path[idx].len = path[idx].end - path[idx].start + 1; path[idx].mid = path[idx].start + path[idx].len / 2; } } } if (cont_path_cnt == 0) { RTSX_DEBUGP("No continuous phase path\n"); goto Search_Finish; } else { int idx = cont_path_cnt - 1; path[idx].len = path[idx].end - path[idx].start + 1; path[idx].mid = path[idx].start + path[idx].len / 2; } if ((path[0].start == 0) && (path[cont_path_cnt - 1].end == MAX_PHASE)) { path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1; path[0].len += path[cont_path_cnt - 1].len; path[0].mid = path[0].start + path[0].len / 2; if (path[0].mid < 0) { path[0].mid += MAX_PHASE + 1; } cont_path_cnt--; } max_len = 0; final_phase = 0; final_path_idx = 0; for (i = 0; i < cont_path_cnt; i++) { if (path[i].len > max_len) { max_len = path[i].len; final_phase = (u8)path[i].mid; final_path_idx = i; } RTSX_DEBUGP("path[%d].start = %d\n", i, path[i].start); RTSX_DEBUGP("path[%d].end = %d\n", i, path[i].end); RTSX_DEBUGP("path[%d].len = %d\n", i, path[i].len); RTSX_DEBUGP("path[%d].mid = %d\n", i, path[i].mid); RTSX_DEBUGP("\n"); } if (tune_dir == TUNE_TX) { if (CHK_SD_SDR104(sd_card)) { if (max_len > 15) { int temp_mid = (max_len - 16) / 2; int temp_final_phase = path[final_path_idx].end - (max_len - (6 + temp_mid)); if (temp_final_phase < 0) { final_phase = (u8)(temp_final_phase + MAX_PHASE + 1); } else { final_phase = (u8)temp_final_phase; } } } else if (CHK_SD_SDR50(sd_card)) { if (max_len > 12) { int temp_mid = (max_len - 13) / 2; int temp_final_phase = path[final_path_idx].end - (max_len - (3 + temp_mid)); if (temp_final_phase < 0) { final_phase = (u8)(temp_final_phase + MAX_PHASE + 1); } else { final_phase = (u8)temp_final_phase; } } } } Search_Finish: RTSX_DEBUGP("Final chosen phase: %d\n", final_phase); return final_phase; } static int sd_tuning_rx(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i, j; u32 raw_phase_map[3], phase_map; u8 final_phase; int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point); if (CHK_SD(sd_card)) { if (CHK_SD_DDR50(sd_card)) { tuning_cmd = sd_ddr_tuning_rx_cmd; } else { tuning_cmd = sd_sdr_tuning_rx_cmd; } } else { if (CHK_MMC_DDR52(sd_card)) { tuning_cmd = mmc_ddr_tunning_rx_cmd; } else { TRACE_RET(chip, STATUS_FAIL); } } for (i = 0; i < 3; i++) { raw_phase_map[i] = 0; for (j = MAX_PHASE; j >= 0; j--) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = tuning_cmd(chip, (u8)j); if (retval == STATUS_SUCCESS) { raw_phase_map[i] |= 1 << j; } } } phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2]; for (i = 0; i < 3; i++) { RTSX_DEBUGP("RX raw_phase_map[%d] = 0x%08x\n", i, raw_phase_map[i]); } RTSX_DEBUGP("RX phase_map = 0x%08x\n", phase_map); final_phase = sd_search_final_phase(chip, phase_map, TUNE_RX); if (final_phase == 0xFF) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_change_phase(chip, final_phase, TUNE_RX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i; u32 phase_map; u8 final_phase; RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, SD_RSP_80CLK_TIMEOUT_EN); phase_map = 0; for (i = MAX_PHASE; i >= 0; i--) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); TRACE_RET(chip, STATUS_FAIL); } retval = sd_change_phase(chip, (u8)i, TUNE_TX); if (retval != STATUS_SUCCESS) { continue; } retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if ((retval == STATUS_SUCCESS) || !sd_check_err_code(chip, SD_RSP_TIMEOUT)) { phase_map |= 1 << i; } } RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); RTSX_DEBUGP("DDR TX pre tune phase_map = 0x%08x\n", phase_map); final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX); if (final_phase == 0xFF) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_change_phase(chip, final_phase, TUNE_TX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("DDR TX pre tune phase: %d\n", (int)final_phase); return STATUS_SUCCESS; } static int sd_tuning_tx(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; int i, j; u32 raw_phase_map[3], phase_map; u8 final_phase; int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point); if (CHK_SD(sd_card)) { if (CHK_SD_DDR50(sd_card)) { tuning_cmd = sd_ddr_tuning_tx_cmd; } else { tuning_cmd = sd_sdr_tuning_tx_cmd; } } else { if (CHK_MMC_DDR52(sd_card)) { tuning_cmd = sd_ddr_tuning_tx_cmd; } else { TRACE_RET(chip, STATUS_FAIL); } } for (i = 0; i < 3; i++) { raw_phase_map[i] = 0; for (j = MAX_PHASE; j >= 0; j--) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); TRACE_RET(chip, STATUS_FAIL); } retval = tuning_cmd(chip, (u8)j); if (retval == STATUS_SUCCESS) { raw_phase_map[i] |= 1 << j; } } } phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2]; for (i = 0; i < 3; i++) { RTSX_DEBUGP("TX raw_phase_map[%d] = 0x%08x\n", i, raw_phase_map[i]); } RTSX_DEBUGP("TX phase_map = 0x%08x\n", phase_map); final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX); if (final_phase == 0xFF) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_change_phase(chip, final_phase, TUNE_TX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_sdr_tuning(struct rtsx_chip *chip) { int retval; retval = sd_tuning_tx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_tuning_rx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_ddr_tuning(struct rtsx_chip *chip) { int retval; if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) { retval = sd_ddr_pre_tuning_tx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase, TUNE_TX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = sd_tuning_rx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) { retval = sd_tuning_tx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int mmc_ddr_tuning(struct rtsx_chip *chip) { int retval; if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) { retval = sd_ddr_pre_tuning_tx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase, TUNE_TX); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = sd_tuning_rx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) { retval = sd_tuning_tx(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } int sd_switch_clock(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; int re_tuning = 0; retval = select_card(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHECK_PID(chip, 0x5209) && (CHK_SD30_SPEED(sd_card) || CHK_MMC_DDR52(sd_card))) { if (sd_card->need_retune && (sd_card->sd_clock != chip->cur_clk)) { re_tuning = 1; sd_card->need_retune = 0; } } retval = switch_clock(chip, sd_card->sd_clock); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (re_tuning) { if (CHK_SD(sd_card)) { if (CHK_SD_DDR50(sd_card)) { retval = sd_ddr_tuning(chip); } else { retval = sd_sdr_tuning(chip); } } else { if (CHK_MMC_DDR52(sd_card)) { retval = mmc_ddr_tuning(chip); } } if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int sd_prepare_reset(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; if (chip->asic_code) { sd_card->sd_clock = 29; } else { sd_card->sd_clock = CLK_30; } sd_card->sd_type = 0; sd_card->seq_mode = 0; sd_card->sd_data_buf_ready = 0; sd_card->capacity = 0; #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status = 0; sd_card->sd_erase_status = 0; #endif chip->capacity[chip->card2lun[SD_CARD]] = 0; chip->sd_io = 0; retval = sd_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, retval); } if (CHECK_PID(chip, 0x5209)) { RTSX_WRITE_REG(chip, REG_SD_CFG1, 0xFF, SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1); RTSX_WRITE_REG(chip, SD_SAMPLE_POINT_CTL, 0xFF, SD20_RX_POS_EDGE); RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0xFF, 0); } else { RTSX_WRITE_REG(chip, REG_SD_CFG1, 0xFF, 0x40); } RTSX_WRITE_REG(chip, CARD_STOP, SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR); retval = select_card(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_pull_ctl_disable(struct rtsx_chip *chip) { if (CHECK_PID(chip, 0x5209)) { RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0xD5); } else if (CHECK_PID(chip, 0x5208)) { RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF, MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B); RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69); } } return STATUS_SUCCESS; } int sd_pull_ctl_enable(struct rtsx_chip *chip) { int retval; rtsx_init_cmd(chip); if (CHECK_PID(chip, 0x5209)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0xAA); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xE9); } else if (CHECK_PID(chip, 0x5208)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA8); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x5A); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0xAA); } } retval = rtsx_send_cmd(chip, SD_CARD, 100); if (retval < 0) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_init_power(struct rtsx_chip *chip) { int retval; if (CHECK_PID(chip, 0x5209)) { RTSX_WRITE_REG(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_OFF); } retval = sd_power_off_card3v3(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!chip->ft2_fast_mode) { wait_timeout(250); } retval = enable_card_clock(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (chip->asic_code) { retval = sd_pull_ctl_enable(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_SD_PULL_CTL_BIT | 0x20, 0); } if (chip->ft2_fast_mode) { if (CHECK_PID(chip, 0x5209)) { RTSX_WRITE_REG(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON); } } else { retval = card_power_on(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } wait_timeout(260); #ifdef SUPPORT_OCP if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat); TRACE_RET(chip, STATUS_FAIL); } #endif } RTSX_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); return STATUS_SUCCESS; } static int sd_dummy_clock(struct rtsx_chip *chip) { if (CHECK_PID(chip, 0x5209)) { RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN); wait_timeout(5); RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0x00); } else { RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x01, 0x01); wait_timeout(5); RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x01, 0); } return STATUS_SUCCESS; } static int sd_read_lba0(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 cmd[5], bus_width; cmd[0] = 0x40 | READ_SINGLE_BLOCK; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; if (CHK_SD(sd_card)) { bus_width = SD_BUS_WIDTH_4; } else { if (CHK_MMC_8BIT(sd_card)) { bus_width = SD_BUS_WIDTH_8; } else if (CHK_MMC_4BIT(sd_card)) { bus_width = SD_BUS_WIDTH_4; } else { bus_width = SD_BUS_WIDTH_1; } } retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1, bus_width, NULL, 0, 100); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int sd_check_wp_state(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; u32 val; u16 sd_card_type; u8 cmd[5], buf[64]; retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } cmd[0] = 0x40 | SD_STATUS; cmd[1] = 0; cmd[2] = 0; cmd[3] = 0; cmd[4] = 0; retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, SD_BUS_WIDTH_4, buf, 64, 250); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("ACMD13:\n"); RTSX_DUMP(buf, 64); sd_card_type = ((u16)buf[2] << 8) | buf[3]; RTSX_DEBUGP("sd_card_type = 0x%04x\n", sd_card_type); if ((sd_card_type == 0x0001) || (sd_card_type == 0x0002)) { /* ROM card or OTP */ chip->card_wp |= SD_CARD; } /* Check SD Machanical Write-Protect Switch */ val = rtsx_readl(chip, RTSX_BIPR); if (val & SD_WRITE_PROTECT) { chip->card_wp |= SD_CARD; } return STATUS_SUCCESS; } static int reset_sd(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval, i = 0, j = 0, k = 0, hi_cap_flow = 0; int sd_dont_switch = 0; int support_1v8 = 0; int try_sdio = 1; u8 rsp[16]; u8 switch_bus_width; u32 voltage = 0; int sd20_mode = 0; SET_SD(sd_card); Switch_Fail: i = 0; j = 0; k = 0; hi_cap_flow = 0; #ifdef SUPPORT_SD_LOCK if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) goto SD_UNLOCK_ENTRY; #endif retval = sd_prepare_reset(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_dummy_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) { int rty_cnt = 0; for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5); if (retval == STATUS_SUCCESS) { int func_num = (rsp[1] >> 4) & 0x07; if (func_num) { RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num); chip->sd_io = 1; TRACE_RET(chip, STATUS_FAIL); } break; } sd_init_power(chip); sd_dummy_clock(chip); } RTSX_DEBUGP("Normal card!\n"); } /* Start Initialization Process of SD Card */ RTY_SD_RST: retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } wait_timeout(20); retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA, SD_RSP_TYPE_R7, rsp, 5); if (retval == STATUS_SUCCESS) { if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) { hi_cap_flow = 1; if (CHECK_PID(chip, 0x5209)) { if (sd20_mode) { voltage = SUPPORT_VOLTAGE | SUPPORT_HIGH_AND_EXTENDED_CAPACITY; } else { voltage = SUPPORT_VOLTAGE | SUPPORT_HIGH_AND_EXTENDED_CAPACITY | SUPPORT_MAX_POWER_PERMANCE | SUPPORT_1V8; } } else { voltage = SUPPORT_VOLTAGE | 0x40000000; } } } if (!hi_cap_flow) { voltage = SUPPORT_VOLTAGE; retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } wait_timeout(20); } do { retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } j++; if (j < 3) { goto RTY_SD_RST; } else { TRACE_RET(chip, STATUS_FAIL); } } retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage, SD_RSP_TYPE_R3, rsp, 5); if (retval != STATUS_SUCCESS) { k++; if (k < 3) { goto RTY_SD_RST; } else { TRACE_RET(chip, STATUS_FAIL); } } i++; wait_timeout(20); } while (!(rsp[1] & 0x80) && (i < 255)); if (i == 255) { TRACE_RET(chip, STATUS_FAIL); } if (hi_cap_flow) { if (rsp[1] & 0x40) { SET_SD_HCXC(sd_card); } else { CLR_SD_HCXC(sd_card); } if (CHECK_PID(chip, 0x5209) && CHK_SD_HCXC(sd_card) && !sd20_mode) { support_1v8 = (rsp[1] & 0x01) ? 1 : 0; } else { support_1v8 = 0; } } else { CLR_SD_HCXC(sd_card); support_1v8 = 0; } RTSX_DEBUGP("support_1v8 = %d\n", support_1v8); if (support_1v8) { retval = sd_voltage_switch(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < 3; i++) { retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0, SD_RSP_TYPE_R6, rsp, 5); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } sd_card->sd_addr = (u32)rsp[1] << 24; sd_card->sd_addr += (u32)rsp[2] << 16; if (sd_card->sd_addr) { break; } } retval = sd_check_csd(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } #ifdef SUPPORT_SD_LOCK SD_UNLOCK_ENTRY: retval = sd_update_lock_status(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (sd_card->sd_lock_status & SD_LOCKED) { sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST); return STATUS_SUCCESS; } else if (!(sd_card->sd_lock_status & SD_UNLOCK_POW_ON)) { sd_card->sd_lock_status &= ~SD_PWD_EXIST; } #endif retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (support_1v8) { retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } switch_bus_width = SD_BUS_WIDTH_4; } else { switch_bus_width = SD_BUS_WIDTH_1; } retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!(sd_card->raw_csd[4] & 0x40)) sd_dont_switch = 1; if (!sd_dont_switch) { if (sd20_mode) { /* Set sd_switch_fail here, because we needn't * switch to UHS mode */ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK | DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK; } /* Check the card whether follow SD1.1 spec or higher */ retval = sd_check_spec(chip, switch_bus_width); if (retval == STATUS_SUCCESS) { retval = sd_switch_function(chip, switch_bus_width); if (retval != STATUS_SUCCESS) { if (CHECK_PID(chip, 0x5209)) { sd_change_bank_voltage(chip, SD_IO_3V3); } sd_init_power(chip); sd_dont_switch = 1; try_sdio = 0; goto Switch_Fail; } } else { if (support_1v8) { if (CHECK_PID(chip, 0x5209)) { sd_change_bank_voltage(chip, SD_IO_3V3); } sd_init_power(chip); sd_dont_switch = 1; try_sdio = 0; goto Switch_Fail; } } } if (!support_1v8) { retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE; #endif if (!sd20_mode && CHK_SD30_SPEED(sd_card)) { int read_lba0 = 1; RTSX_WRITE_REG(chip, SD30_DRIVE_SEL, 0x07, chip->sd30_drive_sel_1v8); retval = sd_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_SD_DDR50(sd_card)) { retval = sd_ddr_tuning(chip); } else { retval = sd_sdr_tuning(chip); } if (retval != STATUS_SUCCESS) { if (sd20_mode) { TRACE_RET(chip, STATUS_FAIL); } else { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } try_sdio = 0; sd20_mode = 1; goto Switch_Fail; } } sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (CHK_SD_DDR50(sd_card)) { retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); if (retval != STATUS_SUCCESS) { read_lba0 = 0; } } if (read_lba0) { retval = sd_read_lba0(chip); if (retval != STATUS_SUCCESS) { if (sd20_mode) { TRACE_RET(chip, STATUS_FAIL); } else { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } try_sdio = 0; sd20_mode = 1; goto Switch_Fail; } } } } retval = sd_check_wp_state(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } chip->card_bus_width[chip->card2lun[SD_CARD]] = 4; #ifdef SUPPORT_SD_LOCK if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) { RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_H, 0xFF, 0x02); RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_L, 0xFF, 0x00); } #endif return STATUS_SUCCESS; } static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 buf[8] = {0}, bus_width, *ptr; u16 byte_cnt; int len; retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, SWITCH_FAIL); } if (width == MMC_8BIT_BUS) { buf[0] = 0x55; buf[1] = 0xAA; len = 8; byte_cnt = 8; bus_width = SD_BUS_WIDTH_8; } else { buf[0] = 0x5A; len = 4; byte_cnt = 4; bus_width = SD_BUS_WIDTH_4; } if (!CHECK_PID(chip, 0x5209)) { retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0x02); if (retval != STATUS_SUCCESS) TRACE_RET(chip, SWITCH_ERR); } retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1, bus_width, buf, len, 100); if (retval != STATUS_SUCCESS) { if (CHECK_PID(chip, 0x5209)) { u8 val1 = 0, val2 = 0; rtsx_read_register(chip, REG_SD_STAT1, &val1); rtsx_read_register(chip, REG_SD_STAT2, &val2); rtsx_clear_sd_error(chip); if ((val1 & 0xE0) || val2) { TRACE_RET(chip, SWITCH_ERR); } } else { rtsx_clear_sd_error(chip); rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0); TRACE_RET(chip, SWITCH_ERR); } } if (!CHECK_PID(chip, 0x5209)) { retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0); if (retval != STATUS_SUCCESS) TRACE_RET(chip, SWITCH_ERR); } RTSX_DEBUGP("SD/MMC CMD %d\n", BUSTEST_R); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | BUSTEST_R); if (width == MMC_8BIT_BUS) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x08); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x04); } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_NORMAL_READ | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0); if (width == MMC_8BIT_BUS) { rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 1, 0, 0); } retval = rtsx_send_cmd(chip, SD_CARD, 100); if (retval < 0) { rtsx_clear_sd_error(chip); TRACE_RET(chip, SWITCH_ERR); } ptr = rtsx_get_cmd_data(chip) + 1; if (width == MMC_8BIT_BUS) { RTSX_DEBUGP("BUSTEST_R [8bits]: 0x%02x 0x%02x\n", ptr[0], ptr[1]); if ((ptr[0] == 0xAA) && (ptr[1] == 0x55)) { u8 rsp[5]; u32 arg; if (CHK_MMC_DDR52(sd_card)) { arg = 0x03B70600; } else { arg = 0x03B70200; } retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, SD_RSP_TYPE_R1b, rsp, 5); if ((retval == STATUS_SUCCESS) && !(rsp[4] & MMC_SWITCH_ERR)) { return SWITCH_SUCCESS; } } } else { RTSX_DEBUGP("BUSTEST_R [4bits]: 0x%02x\n", ptr[0]); if (ptr[0] == 0xA5) { u8 rsp[5]; u32 arg; if (CHK_MMC_DDR52(sd_card)) { arg = 0x03B70500; } else { arg = 0x03B70100; } retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, SD_RSP_TYPE_R1b, rsp, 5); if ((retval == STATUS_SUCCESS) && !(rsp[4] & MMC_SWITCH_ERR)) { return SWITCH_SUCCESS; } } } TRACE_RET(chip, SWITCH_FAIL); } static int mmc_switch_timing_bus(struct rtsx_chip *chip, int switch_ddr) { struct sd_info *sd_card = &(chip->sd_card); int retval; u8 *ptr, card_type, card_type_mask = 0; CLR_MMC_HS(sd_card); RTSX_DEBUGP("SD/MMC CMD %d\n", SEND_EXT_CSD); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | SEND_EXT_CSD); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 2); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_NORMAL_READ | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 213, 0xFF, 0); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 214, 0xFF, 0); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 215, 0xFF, 0); retval = rtsx_send_cmd(chip, SD_CARD, 1000); if (retval < 0) { if (retval == -ETIMEDOUT) { rtsx_clear_sd_error(chip); sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); } TRACE_RET(chip, STATUS_FAIL); } ptr = rtsx_get_cmd_data(chip); if (ptr[0] & SD_TRANSFER_ERR) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); TRACE_RET(chip, STATUS_FAIL); } if (CHK_MMC_SECTOR_MODE(sd_card)) { sd_card->capacity = ((u32)ptr[5] << 24) | ((u32)ptr[4] << 16) | ((u32)ptr[3] << 8) | ((u32)ptr[2]); } if (CHECK_PID(chip, 0x5209)) { #ifdef SUPPORT_SD_LOCK if (!(sd_card->sd_lock_status & SD_SDR_RST) && (chip->sd_ctl & SUPPORT_MMC_DDR_MODE)) { card_type_mask = 0x07; } else { card_type_mask = 0x03; } #else if (chip->sd_ctl & SUPPORT_MMC_DDR_MODE) { card_type_mask = 0x07; } else { card_type_mask = 0x03; } #endif } else { card_type_mask = 0x03; } card_type = ptr[1] & card_type_mask; if (card_type) { u8 rsp[5]; if (card_type & 0x04) { if (switch_ddr) { SET_MMC_DDR52(sd_card); } else { SET_MMC_52M(sd_card); } } else if (card_type & 0x02) { SET_MMC_52M(sd_card); } else { SET_MMC_26M(sd_card); } retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5); if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR)) { CLR_MMC_HS(sd_card); } } sd_choose_proper_clock(chip); retval = switch_clock(chip, sd_card->sd_clock); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } /* Test Bus Procedure */ retval = mmc_test_switch_bus(chip, MMC_8BIT_BUS); if (retval == SWITCH_SUCCESS) { SET_MMC_8BIT(sd_card); chip->card_bus_width[chip->card2lun[SD_CARD]] = 8; #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE; #endif } else if (retval == SWITCH_FAIL) { retval = mmc_test_switch_bus(chip, MMC_4BIT_BUS); if (retval == SWITCH_SUCCESS) { SET_MMC_4BIT(sd_card); chip->card_bus_width[chip->card2lun[SD_CARD]] = 4; #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE; #endif } else if (retval == SWITCH_FAIL) { CLR_MMC_8BIT(sd_card); CLR_MMC_4BIT(sd_card); } else { TRACE_RET(chip, STATUS_FAIL); } } else { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int reset_mmc(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval, i = 0, j = 0, k = 0; int switch_ddr = 1; u8 rsp[16]; u8 spec_ver = 0; u32 temp; #ifdef SUPPORT_SD_LOCK if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) goto MMC_UNLOCK_ENTRY; #endif Switch_Fail: retval = sd_prepare_reset(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, retval); } SET_MMC(sd_card); RTY_MMC_RST: retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } do { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND, (SUPPORT_VOLTAGE|0x40000000), SD_RSP_TYPE_R3, rsp, 5); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_BUSY) || sd_check_err_code(chip, SD_TO_ERR)) { k++; if (k < 20) { sd_clr_err_code(chip); goto RTY_MMC_RST; } else { TRACE_RET(chip, STATUS_FAIL); } } else { j++; if (j < 100) { sd_clr_err_code(chip); goto RTY_MMC_RST; } else { TRACE_RET(chip, STATUS_FAIL); } } } wait_timeout(20); i++; } while (!(rsp[1] & 0x80) && (i < 255)); if (i == 255) { TRACE_RET(chip, STATUS_FAIL); } if ((rsp[1] & 0x60) == 0x40) { SET_MMC_SECTOR_MODE(sd_card); } else { CLR_MMC_SECTOR_MODE(sd_card); } retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } sd_card->sd_addr = 0x00100000; retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr, SD_RSP_TYPE_R6, rsp, 5); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_check_csd(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } spec_ver = (sd_card->raw_csd[0] & 0x3C) >> 2; retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } #ifdef SUPPORT_SD_LOCK MMC_UNLOCK_ENTRY: retval = sd_update_lock_status(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } #endif retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } chip->card_bus_width[chip->card2lun[SD_CARD]] = 1; if (!sd_card->mmc_dont_switch_bus) { if (spec_ver == 4) { /* MMC 4.x Cards */ retval = mmc_switch_timing_bus(chip, switch_ddr); if (retval != STATUS_SUCCESS) { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); sd_card->mmc_dont_switch_bus = 1; TRACE_GOTO(chip, Switch_Fail); } } if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0)) { TRACE_RET(chip, STATUS_FAIL); } if (switch_ddr && CHK_MMC_DDR52(sd_card)) { retval = sd_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = mmc_ddr_tuning(chip); if (retval != STATUS_SUCCESS) { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } switch_ddr = 0; TRACE_GOTO(chip, Switch_Fail); } retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); if (retval == STATUS_SUCCESS) { retval = sd_read_lba0(chip); if (retval != STATUS_SUCCESS) { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } switch_ddr = 0; TRACE_GOTO(chip, Switch_Fail); } } } } #ifdef SUPPORT_SD_LOCK if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) { RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_H, 0xFF, 0x02); RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_L, 0xFF, 0x00); } #endif temp = rtsx_readl(chip, RTSX_BIPR); if (temp & SD_WRITE_PROTECT) { chip->card_wp |= SD_CARD; } return STATUS_SUCCESS; } int reset_sd_card(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; sd_init_reg_addr(chip); memset(sd_card, 0, sizeof(struct sd_info)); chip->capacity[chip->card2lun[SD_CARD]] = 0; retval = enable_card_clock(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (chip->ignore_sd && CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) { if (chip->asic_code) { retval = sd_pull_ctl_enable(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { retval = rtsx_write_register(chip, FPGA_PULL_CTL, FPGA_SD_PULL_CTL_BIT | 0x20, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = card_share_mode(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } chip->sd_io = 1; TRACE_RET(chip, STATUS_FAIL); } retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (chip->sd_ctl & RESET_MMC_FIRST) { retval = reset_mmc(chip); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_NO_CARD)) TRACE_RET(chip, STATUS_FAIL); retval = reset_sd(chip); if (retval != STATUS_SUCCESS) { if (CHECK_PID(chip, 0x5209)) sd_change_bank_voltage(chip, SD_IO_3V3); TRACE_RET(chip, STATUS_FAIL); } } } else { retval = reset_sd(chip); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_NO_CARD)) TRACE_RET(chip, STATUS_FAIL); if (CHECK_PID(chip, 0x5209)) { retval = sd_change_bank_voltage(chip, SD_IO_3V3); if (retval != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); } if (chip->sd_io) { TRACE_RET(chip, STATUS_FAIL); } else { retval = reset_mmc(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); } } } retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_L, 0xFF, 0); RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_H, 0xFF, 2); chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity; retval = sd_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("sd_card->sd_type = 0x%x\n", sd_card->sd_type); return STATUS_SUCCESS; } static int reset_mmc_only(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; sd_card->sd_type = 0; sd_card->seq_mode = 0; sd_card->sd_data_buf_ready = 0; sd_card->capacity = 0; sd_card->sd_switch_fail = 0; #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status = 0; sd_card->sd_erase_status = 0; #endif chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity = 0; retval = enable_card_clock(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = reset_mmc(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_L, 0xFF, 0); RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_H, 0xFF, 2); chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity; retval = sd_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("In reset_mmc_only, sd_card->sd_type = 0x%x\n", sd_card->sd_type); return STATUS_SUCCESS; } #define WAIT_DATA_READY_RTY_CNT 255 static int wait_data_buf_ready(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int i, retval; for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } sd_card->sd_data_buf_ready = 0; retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (sd_card->sd_data_buf_ready) { return sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); } } sd_set_err_code(chip, SD_TO_ERR); TRACE_RET(chip, STATUS_FAIL); } void sd_stop_seq_mode(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; if (sd_card->seq_mode) { retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { return; } retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, SD_RSP_TYPE_R1b, NULL, 0); if (retval != STATUS_SUCCESS) { sd_set_err_code(chip, SD_STS_ERR); } retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); if (retval != STATUS_SUCCESS) { sd_set_err_code(chip, SD_STS_ERR); } sd_card->seq_mode = 0; rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH); } } static inline int sd_auto_tune_clock(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; if (chip->asic_code) { if (sd_card->sd_clock > 30) { sd_card->sd_clock -= 20; } } else { switch (sd_card->sd_clock) { case CLK_200: sd_card->sd_clock = CLK_150; break; case CLK_150: sd_card->sd_clock = CLK_120; break; case CLK_120: sd_card->sd_clock = CLK_100; break; case CLK_100: sd_card->sd_clock = CLK_80; break; case CLK_80: sd_card->sd_clock = CLK_60; break; case CLK_60: sd_card->sd_clock = CLK_50; break; default: break; } } retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { struct sd_info *sd_card = &(chip->sd_card); u32 data_addr; u8 cfg2; int retval; if (srb->sc_data_direction == DMA_FROM_DEVICE) { RTSX_DEBUGP("sd_rw: Read %d %s from 0x%x\n", sector_cnt, (sector_cnt > 1) ? "sectors" : "sector", start_sector); } else { RTSX_DEBUGP("sd_rw: Write %d %s to 0x%x\n", sector_cnt, (sector_cnt > 1) ? "sectors" : "sector", start_sector); } sd_card->cleanup_counter = 0; if (!(chip->card_ready & SD_CARD)) { sd_card->seq_mode = 0; retval = reset_sd_card(chip); if (retval == STATUS_SUCCESS) { chip->card_ready |= SD_CARD; chip->card_fail &= ~SD_CARD; } else { chip->card_ready &= ~SD_CARD; chip->card_fail |= SD_CARD; chip->capacity[chip->card2lun[SD_CARD]] = 0; chip->rw_need_retry = 1; TRACE_RET(chip, STATUS_FAIL); } } if (!CHK_SD_HCXC(sd_card) && !CHK_MMC_SECTOR_MODE(sd_card)) { data_addr = start_sector << 9; } else { data_addr = start_sector; } sd_clr_err_code(chip); retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { sd_set_err_code(chip, SD_IO_ERR); TRACE_GOTO(chip, RW_FAIL); } if (sd_card->seq_mode && ((sd_card->pre_dir != srb->sc_data_direction) || ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) != start_sector))) { if ((sd_card->pre_sec_cnt < 0x80) && (sd_card->pre_dir == DMA_FROM_DEVICE) && !CHK_SD30_SPEED(sd_card) && !CHK_SD_HS(sd_card) && !CHK_MMC_HS(sd_card)) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); } retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, SD_RSP_TYPE_R1b, NULL, 0); if (retval != STATUS_SUCCESS) { chip->rw_need_retry = 1; sd_set_err_code(chip, SD_STS_ERR); TRACE_GOTO(chip, RW_FAIL); } sd_card->seq_mode = 0; retval = rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH); if (retval != STATUS_SUCCESS) { sd_set_err_code(chip, SD_IO_ERR); TRACE_GOTO(chip, RW_FAIL); } if ((sd_card->pre_sec_cnt < 0x80) && !CHK_SD30_SPEED(sd_card) && !CHK_SD_HS(sd_card) && !CHK_MMC_HS(sd_card)) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); } } rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, (u8)sector_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, (u8)(sector_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); if (CHK_MMC_8BIT(sd_card)) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_8); } else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card)) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_1); } if (sd_card->seq_mode) { cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; if (CHECK_PID(chip, 0x5209)) { if (!CHK_SD30_SPEED(sd_card)) { cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; } } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2); trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512, DMA_512); if (srb->sc_data_direction == DMA_FROM_DEVICE) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_READ_3 | SD_TRANSFER_START); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); } rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); } else { if (srb->sc_data_direction == DMA_FROM_DEVICE) { RTSX_DEBUGP("SD/MMC CMD %d\n", READ_MULTIPLE_BLOCK); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | READ_MULTIPLE_BLOCK); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(data_addr >> 24)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(data_addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(data_addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)data_addr); cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6; if (CHECK_PID(chip, 0x5209)) { if (!CHK_SD30_SPEED(sd_card)) { cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; } } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2); trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); } else { retval = rtsx_send_cmd(chip, SD_CARD, 50); if (retval < 0) { rtsx_clear_sd_error(chip); chip->rw_need_retry = 1; sd_set_err_code(chip, SD_TO_ERR); TRACE_GOTO(chip, RW_FAIL); } retval = wait_data_buf_ready(chip); if (retval != STATUS_SUCCESS) { chip->rw_need_retry = 1; sd_set_err_code(chip, SD_TO_ERR); TRACE_GOTO(chip, RW_FAIL); } retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK, data_addr, SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { chip->rw_need_retry = 1; TRACE_GOTO(chip, RW_FAIL); } rtsx_init_cmd(chip); cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; if (CHECK_PID(chip, 0x5209)) { if (!CHK_SD30_SPEED(sd_card)) { cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; } } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2); trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); } sd_card->seq_mode = 1; } retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), scsi_bufflen(srb), scsi_sg_count(srb), srb->sc_data_direction, chip->sd_timeout); if (retval < 0) { u8 stat = 0; int err; sd_card->seq_mode = 0; if (retval == -ETIMEDOUT) { err = STATUS_TIMEDOUT; } else { err = STATUS_FAIL; } rtsx_read_register(chip, REG_SD_STAT1, &stat); rtsx_clear_sd_error(chip); if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { chip->rw_need_retry = 0; RTSX_DEBUGP("No card exist, exit sd_rw\n"); TRACE_RET(chip, STATUS_FAIL); } chip->rw_need_retry = 1; retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, SD_RSP_TYPE_R1b, NULL, 0); if (retval != STATUS_SUCCESS) { sd_set_err_code(chip, SD_STS_ERR); TRACE_GOTO(chip, RW_FAIL); } if (stat & (SD_CRC7_ERR | SD_CRC16_ERR | SD_CRC_WRITE_ERR)) { RTSX_DEBUGP("SD CRC error, tune clock!\n"); sd_set_err_code(chip, SD_CRC_ERR); TRACE_GOTO(chip, RW_FAIL); } if (err == STATUS_TIMEDOUT) { sd_set_err_code(chip, SD_TO_ERR); TRACE_GOTO(chip, RW_FAIL); } TRACE_RET(chip, err); } sd_card->pre_sec_addr = start_sector; sd_card->pre_sec_cnt = sector_cnt; sd_card->pre_dir = srb->sc_data_direction; return STATUS_SUCCESS; RW_FAIL: sd_card->seq_mode = 0; if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { chip->rw_need_retry = 0; RTSX_DEBUGP("No card exist, exit sd_rw\n"); TRACE_RET(chip, STATUS_FAIL); } if (sd_check_err_code(chip, SD_CRC_ERR)) { if (CHK_MMC_4BIT(sd_card) || CHK_MMC_8BIT(sd_card)) { sd_card->mmc_dont_switch_bus = 1; reset_mmc_only(chip); sd_card->mmc_dont_switch_bus = 0; } else { sd_card->need_retune = 1; sd_auto_tune_clock(chip); } } else if (sd_check_err_code(chip, SD_TO_ERR | SD_STS_ERR)) { retval = reset_sd_card(chip); if (retval != STATUS_SUCCESS) { chip->card_ready &= ~SD_CARD; chip->card_fail |= SD_CARD; chip->capacity[chip->card2lun[SD_CARD]] = 0; } } TRACE_RET(chip, STATUS_FAIL); } #ifdef SUPPORT_CPRM int soft_reset_sd_card(struct rtsx_chip *chip) { return reset_sd(chip); } int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, int special_check) { int retval; int timeout = 100; u16 reg_addr; u8 *ptr; int stat_idx = 0; int rty_cnt = 0; RTSX_DEBUGP("EXT SD/MMC CMD %d\n", cmd_idx); if (rsp_type == SD_RSP_TYPE_R1b) { timeout = 3000; } RTY_SEND_CMD: rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); if (rsp_type == SD_RSP_TYPE_R2) { for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } stat_idx = 17; } else if (rsp_type != SD_RSP_TYPE_R0) { for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } stat_idx = 6; } rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0, 0); rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0); retval = rtsx_send_cmd(chip, SD_CARD, timeout); if (retval < 0) { if (retval == -ETIMEDOUT) { rtsx_clear_sd_error(chip); if (rsp_type & SD_WAIT_BUSY_END) { retval = sd_check_data0_status(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, retval); } } else { sd_set_err_code(chip, SD_TO_ERR); } } TRACE_RET(chip, STATUS_FAIL); } if (rsp_type == SD_RSP_TYPE_R0) { return STATUS_SUCCESS; } ptr = rtsx_get_cmd_data(chip) + 1; if ((ptr[0] & 0xC0) != 0) { sd_set_err_code(chip, SD_STS_ERR); TRACE_RET(chip, STATUS_FAIL); } if (!(rsp_type & SD_NO_CHECK_CRC7)) { if (ptr[stat_idx] & SD_CRC7_ERR) { if (cmd_idx == WRITE_MULTIPLE_BLOCK) { sd_set_err_code(chip, SD_CRC_ERR); TRACE_RET(chip, STATUS_FAIL); } if (rty_cnt < SD_MAX_RETRY_COUNT) { wait_timeout(20); rty_cnt++; goto RTY_SEND_CMD; } else { sd_set_err_code(chip, SD_CRC_ERR); TRACE_RET(chip, STATUS_FAIL); } } } if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) || (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) { if ((cmd_idx != STOP_TRANSMISSION) && (special_check == 0)) { if (ptr[1] & 0x80) { TRACE_RET(chip, STATUS_FAIL); } } #ifdef SUPPORT_SD_LOCK if (ptr[1] & 0x7D) #else if (ptr[1] & 0x7F) #endif { TRACE_RET(chip, STATUS_FAIL); } if (ptr[2] & 0xF8) { TRACE_RET(chip, STATUS_FAIL); } if (cmd_idx == SELECT_CARD) { if (rsp_type == SD_RSP_TYPE_R2) { if ((ptr[3] & 0x1E) != 0x04) { TRACE_RET(chip, STATUS_FAIL); } } else if (rsp_type == SD_RSP_TYPE_R0) { if ((ptr[3] & 0x1E) != 0x03) { TRACE_RET(chip, STATUS_FAIL); } } } } if (rsp && rsp_len) { memcpy(rsp, ptr, rsp_len); } return STATUS_SUCCESS; } int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type) { int retval, rsp_len; u16 reg_addr; if (rsp_type == SD_RSP_TYPE_R0) { return STATUS_SUCCESS; } rtsx_init_cmd(chip); if (rsp_type == SD_RSP_TYPE_R2) { for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0); } rsp_len = 17; } else if (rsp_type != SD_RSP_TYPE_R0) { for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0); } rsp_len = 6; } rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0xFF, 0); retval = rtsx_send_cmd(chip, SD_CARD, 100); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (rsp) { int min_len = (rsp_len < len) ? rsp_len : len; memcpy(rsp, rtsx_get_cmd_data(chip), min_len); RTSX_DEBUGP("min_len = %d\n", min_len); RTSX_DEBUGP("Response in cmd buf: 0x%x 0x%x 0x%x 0x%x\n", rsp[0], rsp[1], rsp[2], rsp[3]); } return STATUS_SUCCESS; } int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); unsigned int lun = SCSI_LUN(srb); int len; u8 buf[18] = { 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x53, 0x44, 0x20, 0x43, 0x61, 0x72, 0x64, 0x00, 0x00, 0x00, }; sd_card->pre_cmd_err = 0; if (!(CHK_BIT(chip->lun_mc, lun))) { SET_BIT(chip->lun_mc, lun); set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); TRACE_RET(chip, TRANSPORT_FAILED); } if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) || (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) || (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) || (0x64 != srb->cmnd[8])) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } switch (srb->cmnd[1] & 0x0F) { case 0: sd_card->sd_pass_thru_en = 0; break; case 1: sd_card->sd_pass_thru_en = 1; break; default: set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } buf[5] = (1 == CHK_SD(sd_card)) ? 0x01 : 0x02; if (chip->card_wp & SD_CARD) { buf[5] |= 0x80; } buf[6] = (u8)(sd_card->sd_addr >> 16); buf[7] = (u8)(sd_card->sd_addr >> 24); buf[15] = chip->max_lun; len = min(18, (int)scsi_bufflen(srb)); rtsx_stor_set_xfer_buf(buf, len, srb); return TRANSPORT_GOOD; } static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type, int *rsp_len) { if (!rsp_type || !rsp_len) { return STATUS_FAIL; } switch (srb->cmnd[10]) { case 0x03: *rsp_type = SD_RSP_TYPE_R0; *rsp_len = 0; break; case 0x04: *rsp_type = SD_RSP_TYPE_R1; *rsp_len = 6; break; case 0x05: *rsp_type = SD_RSP_TYPE_R1b; *rsp_len = 6; break; case 0x06: *rsp_type = SD_RSP_TYPE_R2; *rsp_len = 17; break; case 0x07: *rsp_type = SD_RSP_TYPE_R3; *rsp_len = 6; break; default: return STATUS_FAIL; } return STATUS_SUCCESS; } int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); unsigned int lun = SCSI_LUN(srb); int retval, rsp_len; u8 cmd_idx, rsp_type; u8 standby = 0, acmd = 0; u32 arg; if (!sd_card->sd_pass_thru_en) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); TRACE_RET(chip, TRANSPORT_FAILED); } cmd_idx = srb->cmnd[2] & 0x3F; if (srb->cmnd[1] & 0x02) { standby = 1; } if (srb->cmnd[1] & 0x01) { acmd = 1; } arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) | ((u32)srb->cmnd[5] << 8) | srb->cmnd[6]; retval = get_rsp_type(srb, &rsp_type, &rsp_len); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } sd_card->last_rsp_type = rsp_type; retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } #ifdef SUPPORT_SD_LOCK if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { if (CHK_MMC_8BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_8); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } } } #else retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } #endif if (standby) { retval = sd_select_card(chip, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Cmd_Failed); } } if (acmd) { retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Cmd_Failed); } } retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type, sd_card->rsp, rsp_len, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Cmd_Failed); } if (standby) { retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Cmd_Failed); } } #ifdef SUPPORT_SD_LOCK retval = sd_update_lock_status(chip); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Cmd_Failed); } #endif scsi_set_resid(srb, 0); return TRANSPORT_GOOD; SD_Execute_Cmd_Failed: sd_card->pre_cmd_err = 1; set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); release_sd_card(chip); do_reset_sd_card(chip); if (!(chip->card_ready & SD_CARD)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); } TRACE_RET(chip, TRANSPORT_FAILED); } int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); unsigned int lun = SCSI_LUN(srb); int retval, rsp_len, i; int cmd13_checkbit = 0, read_err = 0; u8 cmd_idx, rsp_type, bus_width; u8 send_cmd12 = 0, standby = 0, acmd = 0; u32 data_len; if (!sd_card->sd_pass_thru_en) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); TRACE_RET(chip, TRANSPORT_FAILED); } retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } cmd_idx = srb->cmnd[2] & 0x3F; if (srb->cmnd[1] & 0x04) { send_cmd12 = 1; } if (srb->cmnd[1] & 0x02) { standby = 1; } if (srb->cmnd[1] & 0x01) { acmd = 1; } data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8] << 8) | srb->cmnd[9]; retval = get_rsp_type(srb, &rsp_type, &rsp_len); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } sd_card->last_rsp_type = rsp_type; retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } #ifdef SUPPORT_SD_LOCK if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { if (CHK_MMC_8BIT(sd_card)) { bus_width = SD_BUS_WIDTH_8; } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { bus_width = SD_BUS_WIDTH_4; } else { bus_width = SD_BUS_WIDTH_1; } } else { bus_width = SD_BUS_WIDTH_4; } RTSX_DEBUGP("bus_width = %d\n", bus_width); #else bus_width = SD_BUS_WIDTH_4; #endif if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } if (standby) { retval = sd_select_card(chip, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } if (acmd) { retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } if (data_len <= 512) { int min_len; u8 *buf; u16 byte_cnt, blk_cnt; u8 cmd[5]; byte_cnt = ((u16)(srb->cmnd[8] & 0x03) << 8) | srb->cmnd[9]; blk_cnt = 1; cmd[0] = 0x40 | cmd_idx; cmd[1] = srb->cmnd[3]; cmd[2] = srb->cmnd[4]; cmd[3] = srb->cmnd[5]; cmd[4] = srb->cmnd[6]; buf = kmalloc(data_len, GFP_KERNEL); if (buf == NULL) { TRACE_RET(chip, TRANSPORT_ERROR); } retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt, blk_cnt, bus_width, buf, data_len, 2000); if (retval != STATUS_SUCCESS) { read_err = 1; kfree(buf); rtsx_clear_sd_error(chip); TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } min_len = min(data_len, scsi_bufflen(srb)); rtsx_stor_set_xfer_buf(buf, min_len, srb); kfree(buf); } else if (!(data_len & 0x1FF)) { rtsx_init_cmd(chip); trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, (srb->cmnd[7] & 0xFE) >> 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, srb->cmnd[3]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, srb->cmnd[4]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, srb->cmnd[5]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, srb->cmnd[6]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), scsi_bufflen(srb), scsi_sg_count(srb), DMA_FROM_DEVICE, 10000); if (retval < 0) { read_err = 1; rtsx_clear_sd_error(chip); TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } else { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } if (standby) { retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } if (send_cmd12) { retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, SD_RSP_TYPE_R1b, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } } if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04)) { cmd13_checkbit = 1; } for (i = 0; i < 3; i++) { retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0, cmd13_checkbit); if (retval == STATUS_SUCCESS) { break; } } if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed); } scsi_set_resid(srb, 0); return TRANSPORT_GOOD; SD_Execute_Read_Cmd_Failed: sd_card->pre_cmd_err = 1; set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); if (read_err) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); } release_sd_card(chip); do_reset_sd_card(chip); if (!(chip->card_ready & SD_CARD)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); } TRACE_RET(chip, TRANSPORT_FAILED); } int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); unsigned int lun = SCSI_LUN(srb); int retval, rsp_len, i; int cmd13_checkbit = 0, write_err = 0; u8 cmd_idx, rsp_type; u8 send_cmd12 = 0, standby = 0, acmd = 0; u32 data_len, arg; #ifdef SUPPORT_SD_LOCK int lock_cmd_fail = 0; u8 sd_lock_state = 0; u8 lock_cmd_type = 0; #endif if (!sd_card->sd_pass_thru_en) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); TRACE_RET(chip, TRANSPORT_FAILED); } retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } cmd_idx = srb->cmnd[2] & 0x3F; if (srb->cmnd[1] & 0x04) { send_cmd12 = 1; } if (srb->cmnd[1] & 0x02) { standby = 1; } if (srb->cmnd[1] & 0x01) { acmd = 1; } data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8] << 8) | srb->cmnd[9]; arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) | ((u32)srb->cmnd[5] << 8) | srb->cmnd[6]; #ifdef SUPPORT_SD_LOCK if (cmd_idx == LOCK_UNLOCK) { sd_lock_state = sd_card->sd_lock_status; sd_lock_state &= SD_LOCKED; } #endif retval = get_rsp_type(srb, &rsp_type, &rsp_len); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } sd_card->last_rsp_type = rsp_type; retval = sd_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } #ifdef SUPPORT_SD_LOCK if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { if (CHK_MMC_8BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_8); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } } } #else retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, TRANSPORT_FAILED); } #endif if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } if (standby) { retval = sd_select_card(chip, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } if (acmd) { retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type, sd_card->rsp, rsp_len, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } if (data_len <= 512) { u16 i; u8 *buf; buf = kmalloc(data_len, GFP_KERNEL); if (buf == NULL) { TRACE_RET(chip, TRANSPORT_ERROR); } rtsx_stor_get_xfer_buf(buf, data_len, srb); #ifdef SUPPORT_SD_LOCK if (cmd_idx == LOCK_UNLOCK) { lock_cmd_type = buf[0] & 0x0F; } #endif if (data_len > 256) { rtsx_init_cmd(chip); for (i = 0; i < 256; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, buf[i]); } retval = rtsx_send_cmd(chip, 0, 250); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } rtsx_init_cmd(chip); for (i = 256; i < data_len; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, buf[i]); } retval = rtsx_send_cmd(chip, 0, 250); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } else { rtsx_init_cmd(chip); for (i = 0; i < data_len; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, buf[i]); } retval = rtsx_send_cmd(chip, 0, 250); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } kfree(buf); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, srb->cmnd[8] & 0x03); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, srb->cmnd[9]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 0x01); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); retval = rtsx_send_cmd(chip, SD_CARD, 250); } else if (!(data_len & 0x1FF)) { rtsx_init_cmd(chip); trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, (srb->cmnd[7] & 0xFE) >> 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), scsi_bufflen(srb), scsi_sg_count(srb), DMA_TO_DEVICE, 10000); } else { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } if (retval < 0) { write_err = 1; rtsx_clear_sd_error(chip); TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } #ifdef SUPPORT_SD_LOCK if (cmd_idx == LOCK_UNLOCK) { if (lock_cmd_type == SD_ERASE) { sd_card->sd_erase_status = SD_UNDER_ERASING; scsi_set_resid(srb, 0); return TRANSPORT_GOOD; } rtsx_init_cmd(chip); if (CHECK_PID(chip, 0x5209)) { rtsx_add_cmd(chip, CHECK_REG_CMD, SD_BUS_STAT, SD_DAT0_STATUS, SD_DAT0_STATUS); } else { rtsx_add_cmd(chip, CHECK_REG_CMD, 0xFD30, 0x02, 0x02); } rtsx_send_cmd(chip, SD_CARD, 250); retval = sd_update_lock_status(chip); if (retval != STATUS_SUCCESS) { RTSX_DEBUGP("Lock command fail!\n"); lock_cmd_fail = 1; } } #endif /* SUPPORT_SD_LOCK */ if (standby) { retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } if (send_cmd12) { retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, SD_RSP_TYPE_R1b, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, NULL, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04)) { cmd13_checkbit = 1; } for (i = 0; i < 3; i++) { retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0, cmd13_checkbit); if (retval == STATUS_SUCCESS) { break; } } if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } #ifdef SUPPORT_SD_LOCK if (cmd_idx == LOCK_UNLOCK) { if (!lock_cmd_fail) { RTSX_DEBUGP("lock_cmd_type = 0x%x\n", lock_cmd_type); if (lock_cmd_type & SD_CLR_PWD) { sd_card->sd_lock_status &= ~SD_PWD_EXIST; } if (lock_cmd_type & SD_SET_PWD) { sd_card->sd_lock_status |= SD_PWD_EXIST; } } RTSX_DEBUGP("sd_lock_state = 0x%x, sd_card->sd_lock_status = 0x%x\n", sd_lock_state, sd_card->sd_lock_status); if (sd_lock_state ^ (sd_card->sd_lock_status & SD_LOCKED)) { sd_card->sd_lock_notify = 1; if (sd_lock_state) { if (sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) { sd_card->sd_lock_status |= (SD_UNLOCK_POW_ON | SD_SDR_RST); if (CHK_SD(sd_card)) { retval = reset_sd(chip); if (retval != STATUS_SUCCESS) { sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST); TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed); } } sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST); } } } } if (lock_cmd_fail) { scsi_set_resid(srb, 0); set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); TRACE_RET(chip, TRANSPORT_FAILED); } #endif /* SUPPORT_SD_LOCK */ scsi_set_resid(srb, 0); return TRANSPORT_GOOD; SD_Execute_Write_Cmd_Failed: sd_card->pre_cmd_err = 1; set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); if (write_err) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); } release_sd_card(chip); do_reset_sd_card(chip); if (!(chip->card_ready & SD_CARD)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); } TRACE_RET(chip, TRANSPORT_FAILED); } int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); unsigned int lun = SCSI_LUN(srb); int count; u16 data_len; if (!sd_card->sd_pass_thru_en) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); TRACE_RET(chip, TRANSPORT_FAILED); } data_len = ((u16)srb->cmnd[7] << 8) | srb->cmnd[8]; if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) { count = (data_len < 17) ? data_len : 17; } else { count = (data_len < 6) ? data_len : 6; } rtsx_stor_set_xfer_buf(sd_card->rsp, count, srb); RTSX_DEBUGP("Response length: %d\n", data_len); RTSX_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n", sd_card->rsp[0], sd_card->rsp[1], sd_card->rsp[2], sd_card->rsp[3]); scsi_set_resid(srb, 0); return TRANSPORT_GOOD; } int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); unsigned int lun = SCSI_LUN(srb); int retval; if (!sd_card->sd_pass_thru_en) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); TRACE_RET(chip, TRANSPORT_FAILED); } if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) || (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) || (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) || (0x64 != srb->cmnd[8])) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } switch (srb->cmnd[1] & 0x0F) { case 0: #ifdef SUPPORT_SD_LOCK if (0x64 == srb->cmnd[9]) { sd_card->sd_lock_status |= SD_SDR_RST; } #endif retval = reset_sd_card(chip); if (retval != STATUS_SUCCESS) { #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status &= ~SD_SDR_RST; #endif set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); sd_card->pre_cmd_err = 1; TRACE_RET(chip, TRANSPORT_FAILED); } #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status &= ~SD_SDR_RST; #endif break; case 1: retval = soft_reset_sd_card(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); sd_card->pre_cmd_err = 1; TRACE_RET(chip, TRANSPORT_FAILED); } break; default: set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, TRANSPORT_FAILED); } scsi_set_resid(srb, 0); return TRANSPORT_GOOD; } #endif void sd_cleanup_work(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); if (sd_card->seq_mode) { RTSX_DEBUGP("SD: stop transmission\n"); sd_stop_seq_mode(chip); sd_card->cleanup_counter = 0; } } int sd_power_off_card3v3(struct rtsx_chip *chip) { int retval; retval = disable_card_clock(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, 0); if (!chip->ft2_fast_mode) { retval = card_power_off(chip, SD_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } wait_timeout(50); } if (chip->asic_code) { retval = sd_pull_ctl_disable(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_SD_PULL_CTL_BIT | 0x20, FPGA_SD_PULL_CTL_BIT); } return STATUS_SUCCESS; } int release_sd_card(struct rtsx_chip *chip) { struct sd_info *sd_card = &(chip->sd_card); int retval; RTSX_DEBUGP("release_sd_card\n"); chip->card_ready &= ~SD_CARD; chip->card_fail &= ~SD_CARD; chip->card_wp &= ~SD_CARD; chip->sd_io = 0; chip->sd_int = 0; #ifdef SUPPORT_SD_LOCK sd_card->sd_lock_status = 0; sd_card->sd_erase_status = 0; #endif memset(sd_card->raw_csd, 0, 16); memset(sd_card->raw_scr, 0, 8); retval = sd_power_off_card3v3(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHECK_PID(chip, 0x5209)) { retval = sd_change_bank_voltage(chip, SD_IO_3V3); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_SD30_SPEED(sd_card)) { RTSX_WRITE_REG(chip, SD30_DRIVE_SEL, 0x07, chip->sd30_drive_sel_3v3); } RTSX_WRITE_REG(chip, OCPPARA2, SD_OCP_THD_MASK, chip->sd_400mA_ocp_thd); } return STATUS_SUCCESS; }
gpl-2.0
anoane/ville-4.2.2-sense5-evitaul_porting
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
5633
6992
/* * x86_energy_perf_policy -- set the energy versus performance * policy preference bias on recent X86 processors. */ /* * Copyright (c) 2010, Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/resource.h> #include <fcntl.h> #include <signal.h> #include <sys/time.h> #include <stdlib.h> #include <string.h> unsigned int verbose; /* set with -v */ unsigned int read_only; /* set with -r */ char *progname; unsigned long long new_bias; int cpu = -1; /* * Usage: * * -c cpu: limit action to a single CPU (default is all CPUs) * -v: verbose output (can invoke more than once) * -r: read-only, don't change any settings * * performance * Performance is paramount. * Unwilling to sacrifice any performance * for the sake of energy saving. (hardware default) * * normal * Can tolerate minor performance compromise * for potentially significant energy savings. * (reasonable default for most desktops and servers) * * powersave * Can tolerate significant performance hit * to maximize energy savings. * * n * a numerical value to write to the underlying MSR. */ void usage(void) { printf("%s: [-c cpu] [-v] " "(-r | 'performance' | 'normal' | 'powersave' | n)\n", progname); exit(1); } #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 #define BIAS_PERFORMANCE 0 #define BIAS_BALANCE 6 #define BIAS_POWERSAVE 15 void cmdline(int argc, char **argv) { int opt; progname = argv[0]; while ((opt = getopt(argc, argv, "+rvc:")) != -1) { switch (opt) { case 'c': cpu = atoi(optarg); break; case 'r': read_only = 1; break; case 'v': verbose++; break; default: usage(); } } /* if -r, then should be no additional optind */ if (read_only && (argc > optind)) usage(); /* * if no -r , then must be one additional optind */ if (!read_only) { if (argc != optind + 1) { printf("must supply -r or policy param\n"); usage(); } if (!strcmp("performance", argv[optind])) { new_bias = BIAS_PERFORMANCE; } else if (!strcmp("normal", argv[optind])) { new_bias = BIAS_BALANCE; } else if (!strcmp("powersave", argv[optind])) { new_bias = BIAS_POWERSAVE; } else { char *endptr; new_bias = strtoull(argv[optind], &endptr, 0); if (endptr == argv[optind] || new_bias > BIAS_POWERSAVE) { fprintf(stderr, "invalid value: %s\n", argv[optind]); usage(); } } } } /* * validate_cpuid() * returns on success, quietly exits on failure (make verbose with -v) */ void validate_cpuid(void) { unsigned int eax, ebx, ecx, edx, max_level; unsigned int fms, family, model, stepping; eax = ebx = ecx = edx = 0; asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0)); if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) { if (verbose) fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel", (char *)&ebx, (char *)&edx, (char *)&ecx); exit(1); } asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx"); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; if (verbose > 1) printf("CPUID %d levels family:model:stepping " "0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); if (!(edx & (1 << 5))) { if (verbose) printf("CPUID: no MSR\n"); exit(1); } /* * Support for MSR_IA32_ENERGY_PERF_BIAS * is indicated by CPUID.06H.ECX.bit3 */ asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6)); if (verbose) printf("CPUID.06H.ECX: 0x%x\n", ecx); if (!(ecx & (1 << 3))) { if (verbose) printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n"); exit(1); } return; /* success */ } unsigned long long get_msr(int cpu, int offset) { unsigned long long msr; char msr_path[32]; int retval; int fd; sprintf(msr_path, "/dev/cpu/%d/msr", cpu); fd = open(msr_path, O_RDONLY); if (fd < 0) { printf("Try \"# modprobe msr\"\n"); perror(msr_path); exit(1); } retval = pread(fd, &msr, sizeof msr, offset); if (retval != sizeof msr) { printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); exit(-2); } close(fd); return msr; } unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset) { unsigned long long old_msr; char msr_path[32]; int retval; int fd; sprintf(msr_path, "/dev/cpu/%d/msr", cpu); fd = open(msr_path, O_RDWR); if (fd < 0) { perror(msr_path); exit(1); } retval = pread(fd, &old_msr, sizeof old_msr, offset); if (retval != sizeof old_msr) { perror("pwrite"); printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); exit(-2); } retval = pwrite(fd, &new_msr, sizeof new_msr, offset); if (retval != sizeof new_msr) { perror("pwrite"); printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval); exit(-2); } close(fd); return old_msr; } void print_msr(int cpu) { printf("cpu%d: 0x%016llx\n", cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS)); } void update_msr(int cpu) { unsigned long long previous_msr; previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS); if (verbose) printf("cpu%d msr0x%x 0x%016llx -> 0x%016llx\n", cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias); return; } char *proc_stat = "/proc/stat"; /* * run func() on every cpu in /dev/cpu */ void for_every_cpu(void (func)(int)) { FILE *fp; int retval; fp = fopen(proc_stat, "r"); if (fp == NULL) { perror(proc_stat); exit(1); } retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); if (retval != 0) { perror("/proc/stat format"); exit(1); } while (1) { int cpu; retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu); if (retval != 1) return; func(cpu); } fclose(fp); } int main(int argc, char **argv) { cmdline(argc, argv); if (verbose > 1) printf("x86_energy_perf_policy Nov 24, 2010" " - Len Brown <lenb@kernel.org>\n"); if (verbose > 1 && !read_only) printf("new_bias %lld\n", new_bias); validate_cpuid(); if (cpu != -1) { if (read_only) print_msr(cpu); else update_msr(cpu); } else { if (read_only) for_every_cpu(print_msr); else for_every_cpu(update_msr); } return 0; }
gpl-2.0
yatto/xteam31
arch/sparc/kernel/us2e_cpufreq.c
7425
9847
/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support * * Copyright (C) 2003 David S. Miller (davem@redhat.com) * * Many thanks to Dominik Brodowski for fixing up the cpufreq * infrastructure in order to make this driver easier to implement. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/cpufreq.h> #include <linux/threads.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <asm/asi.h> #include <asm/timer.h> static struct cpufreq_driver *cpufreq_us2e_driver; struct us2e_freq_percpu_info { struct cpufreq_frequency_table table[6]; }; /* Indexed by cpu number. */ static struct us2e_freq_percpu_info *us2e_freq_table; #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled * in the ESTAR mode control register. */ #define ESTAR_MODE_DIV_1 0x0000000000000000UL #define ESTAR_MODE_DIV_2 0x0000000000000001UL #define ESTAR_MODE_DIV_4 0x0000000000000003UL #define ESTAR_MODE_DIV_6 0x0000000000000002UL #define ESTAR_MODE_DIV_8 0x0000000000000004UL #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL #define MCTRL0_REFR_COUNT_SHIFT 8 #define MCTRL0_REFR_INTERVAL 7800 #define MCTRL0_REFR_CLKS_P_CNT 64 static unsigned long read_hbreg(unsigned long addr) { unsigned long ret; __asm__ __volatile__("ldxa [%1] %2, %0" : "=&r" (ret) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); return ret; } static void write_hbreg(unsigned long addr, unsigned long val) { __asm__ __volatile__("stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) : "memory"); if (addr == HBIRD_ESTAR_MODE_ADDR) { /* Need to wait 16 clock cycles for the PLL to lock. */ udelay(1); } } static void self_refresh_ctl(int enable) { unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); if (enable) mctrl |= MCTRL0_SREFRESH_ENAB; else mctrl &= ~MCTRL0_SREFRESH_ENAB; write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); } static void frob_mem_refresh(int cpu_slowing_down, unsigned long clock_tick, unsigned long old_divisor, unsigned long divisor) { unsigned long old_refr_count, refr_count, mctrl; refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) >> MCTRL0_REFR_COUNT_SHIFT; mctrl &= ~MCTRL0_REFR_COUNT_MASK; mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { unsigned long usecs; /* We have to wait for both refresh counts (old * and new) to go to zero. */ usecs = (MCTRL0_REFR_CLKS_P_CNT * (refr_count + old_refr_count) * 1000000UL * old_divisor) / clock_tick; udelay(usecs + 1UL); } } static void us2e_transition(unsigned long estar, unsigned long new_bits, unsigned long clock_tick, unsigned long old_divisor, unsigned long divisor) { unsigned long flags; local_irq_save(flags); estar &= ~ESTAR_MODE_DIV_MASK; /* This is based upon the state transition diagram in the IIe manual. */ if (old_divisor == 2 && divisor == 1) { self_refresh_ctl(0); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); frob_mem_refresh(0, clock_tick, old_divisor, divisor); } else if (old_divisor == 1 && divisor == 2) { frob_mem_refresh(1, clock_tick, old_divisor, divisor); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); self_refresh_ctl(1); } else if (old_divisor == 1 && divisor > 2) { us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, 1, 2); us2e_transition(estar, new_bits, clock_tick, 2, divisor); } else if (old_divisor > 2 && divisor == 1) { us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, old_divisor, 2); us2e_transition(estar, new_bits, clock_tick, 2, divisor); } else if (old_divisor < divisor) { frob_mem_refresh(0, clock_tick, old_divisor, divisor); write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); } else if (old_divisor > divisor) { write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); frob_mem_refresh(1, clock_tick, old_divisor, divisor); } else { BUG(); } local_irq_restore(flags); } static unsigned long index_to_estar_mode(unsigned int index) { switch (index) { case 0: return ESTAR_MODE_DIV_1; case 1: return ESTAR_MODE_DIV_2; case 2: return ESTAR_MODE_DIV_4; case 3: return ESTAR_MODE_DIV_6; case 4: return ESTAR_MODE_DIV_8; default: BUG(); } } static unsigned long index_to_divisor(unsigned int index) { switch (index) { case 0: return 1; case 1: return 2; case 2: return 4; case 3: return 6; case 4: return 8; default: BUG(); } } static unsigned long estar_to_divisor(unsigned long estar) { unsigned long ret; switch (estar & ESTAR_MODE_DIV_MASK) { case ESTAR_MODE_DIV_1: ret = 1; break; case ESTAR_MODE_DIV_2: ret = 2; break; case ESTAR_MODE_DIV_4: ret = 4; break; case ESTAR_MODE_DIV_6: ret = 6; break; case ESTAR_MODE_DIV_8: ret = 8; break; default: BUG(); } return ret; } static unsigned int us2e_freq_get(unsigned int cpu) { cpumask_t cpus_allowed; unsigned long clock_tick, estar; if (!cpu_online(cpu)) return 0; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); clock_tick = sparc64_get_clock_tick(cpu) / 1000; estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); set_cpus_allowed_ptr(current, &cpus_allowed); return clock_tick / estar_to_divisor(estar); } static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) { unsigned long new_bits, new_freq; unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; struct cpufreq_freqs freqs; if (!cpu_online(cpu)) return; cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; new_bits = index_to_estar_mode(index); divisor = index_to_divisor(index); new_freq /= divisor; estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); old_divisor = estar_to_divisor(estar); freqs.old = clock_tick / old_divisor; freqs.new = new_freq; freqs.cpu = cpu; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); if (old_divisor != divisor) us2e_transition(estar, new_bits, clock_tick * 1000, old_divisor, divisor); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); set_cpus_allowed_ptr(current, &cpus_allowed); } static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int new_index = 0; if (cpufreq_frequency_table_target(policy, &us2e_freq_table[policy->cpu].table[0], target_freq, relation, &new_index)) return -EINVAL; us2e_set_cpu_divider_index(policy->cpu, new_index); return 0; } static int us2e_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &us2e_freq_table[policy->cpu].table[0]); } static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; struct cpufreq_frequency_table *table = &us2e_freq_table[cpu].table[0]; table[0].index = 0; table[0].frequency = clock_tick / 1; table[1].index = 1; table[1].frequency = clock_tick / 2; table[2].index = 2; table[2].frequency = clock_tick / 4; table[2].index = 3; table[2].frequency = clock_tick / 6; table[2].index = 4; table[2].frequency = clock_tick / 8; table[2].index = 5; table[3].frequency = CPUFREQ_TABLE_END; policy->cpuinfo.transition_latency = 0; policy->cur = clock_tick; return cpufreq_frequency_table_cpuinfo(policy, table); } static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us2e_driver) us2e_set_cpu_divider_index(policy->cpu, 0); return 0; } static int __init us2e_freq_init(void) { unsigned long manuf, impl, ver; int ret; if (tlb_type != spitfire) return -ENODEV; __asm__("rdpr %%ver, %0" : "=r" (ver)); manuf = ((ver >> 48) & 0xffff); impl = ((ver >> 32) & 0xffff); if (manuf == 0x17 && impl == 0x13) { struct cpufreq_driver *driver; ret = -ENOMEM; driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); if (!driver) goto err_out; us2e_freq_table = kzalloc( (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), GFP_KERNEL); if (!us2e_freq_table) goto err_out; driver->init = us2e_freq_cpu_init; driver->verify = us2e_freq_verify; driver->target = us2e_freq_target; driver->get = us2e_freq_get; driver->exit = us2e_freq_cpu_exit; driver->owner = THIS_MODULE, strcpy(driver->name, "UltraSPARC-IIe"); cpufreq_us2e_driver = driver; ret = cpufreq_register_driver(driver); if (ret) goto err_out; return 0; err_out: if (driver) { kfree(driver); cpufreq_us2e_driver = NULL; } kfree(us2e_freq_table); us2e_freq_table = NULL; return ret; } return -ENODEV; } static void __exit us2e_freq_exit(void) { if (cpufreq_us2e_driver) { cpufreq_unregister_driver(cpufreq_us2e_driver); kfree(cpufreq_us2e_driver); cpufreq_us2e_driver = NULL; kfree(us2e_freq_table); us2e_freq_table = NULL; } } MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); MODULE_LICENSE("GPL"); module_init(us2e_freq_init); module_exit(us2e_freq_exit);
gpl-2.0
Fusion-Devices/android_kernel_samsung_jf
drivers/spi/spi-pxa2xx.c
7937
48160
/* * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/delay.h> MODULE_AUTHOR("Stephen Street"); MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-spi"); #define MAX_BUSES 3 #define TIMOUT_DFLT 1000 #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) #define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0) #define MAX_DMA_LEN 8191 #define DMA_ALIGNMENT 8 /* * for testing SSCR1 changes that require SSP restart, basically * everything except the service and interrupt enables, the pxa270 developer * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this * list, but the PXA255 dev man says all bits without really meaning the * service and interrupt enables */ #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) #define DEFINE_SSP_REG(reg, off) \ static inline u32 read_##reg(void const __iomem *p) \ { return __raw_readl(p + (off)); } \ \ static inline void write_##reg(u32 v, void __iomem *p) \ { __raw_writel(v, p + (off)); } DEFINE_SSP_REG(SSCR0, 0x00) DEFINE_SSP_REG(SSCR1, 0x04) DEFINE_SSP_REG(SSSR, 0x08) DEFINE_SSP_REG(SSITR, 0x0c) DEFINE_SSP_REG(SSDR, 0x10) DEFINE_SSP_REG(SSTO, 0x28) DEFINE_SSP_REG(SSPSP, 0x2c) #define START_STATE ((void*)0) #define RUNNING_STATE ((void*)1) #define DONE_STATE ((void*)2) #define ERROR_STATE ((void*)-1) #define QUEUE_RUNNING 0 #define QUEUE_STOPPED 1 struct driver_data { /* Driver model hookup */ struct platform_device *pdev; /* SSP Info */ struct ssp_device *ssp; /* SPI framework hookup */ enum pxa_ssp_type ssp_type; struct spi_master *master; /* PXA hookup */ struct pxa2xx_spi_master *master_info; /* DMA setup stuff */ int rx_channel; int tx_channel; u32 *null_dma_buf; /* SSP register addresses */ void __iomem *ioaddr; u32 ssdr_physical; /* SSP masks*/ u32 dma_cr1; u32 int_cr1; u32 clear_sr; u32 mask_sr; /* Driver message queue */ struct workqueue_struct *workqueue; struct work_struct pump_messages; spinlock_t lock; struct list_head queue; int busy; int run; /* Message Transfer pump */ struct tasklet_struct pump_transfers; /* Current message transfer state info */ struct spi_message* cur_msg; struct spi_transfer* cur_transfer; struct chip_data *cur_chip; size_t len; void *tx; void *tx_end; void *rx; void *rx_end; int dma_mapped; dma_addr_t rx_dma; dma_addr_t tx_dma; size_t rx_map_len; size_t tx_map_len; u8 n_bytes; u32 dma_width; int (*write)(struct driver_data *drv_data); int (*read)(struct driver_data *drv_data); irqreturn_t (*transfer_handler)(struct driver_data *drv_data); void (*cs_control)(u32 command); }; struct chip_data { u32 cr0; u32 cr1; u32 psp; u32 timeout; u8 n_bytes; u32 dma_width; u32 dma_burst_size; u32 threshold; u32 dma_threshold; u8 enable_dma; u8 bits_per_word; u32 speed_hz; union { int gpio_cs; unsigned int frm; }; int gpio_cs_inverted; int (*write)(struct driver_data *drv_data); int (*read)(struct driver_data *drv_data); void (*cs_control)(u32 command); }; static void pump_messages(struct work_struct *work); static void cs_assert(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; if (drv_data->ssp_type == CE4100_SSP) { write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); return; } if (chip->cs_control) { chip->cs_control(PXA2XX_CS_ASSERT); return; } if (gpio_is_valid(chip->gpio_cs)) gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); } static void cs_deassert(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; if (drv_data->ssp_type == CE4100_SSP) return; if (chip->cs_control) { chip->cs_control(PXA2XX_CS_DEASSERT); return; } if (gpio_is_valid(chip->gpio_cs)) gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); } static void write_SSSR_CS(struct driver_data *drv_data, u32 val) { void __iomem *reg = drv_data->ioaddr; if (drv_data->ssp_type == CE4100_SSP) val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; write_SSSR(val, reg); } static int pxa25x_ssp_comp(struct driver_data *drv_data) { if (drv_data->ssp_type == PXA25x_SSP) return 1; if (drv_data->ssp_type == CE4100_SSP) return 1; return 0; } static int flush(struct driver_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; void __iomem *reg = drv_data->ioaddr; do { while (read_SSSR(reg) & SSSR_RNE) { read_SSDR(reg); } } while ((read_SSSR(reg) & SSSR_BSY) && --limit); write_SSSR_CS(drv_data, SSSR_ROR); return limit; } static int null_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; u8 n_bytes = drv_data->n_bytes; if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; write_SSDR(0, reg); drv_data->tx += n_bytes; return 1; } static int null_reader(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; u8 n_bytes = drv_data->n_bytes; while ((read_SSSR(reg) & SSSR_RNE) && (drv_data->rx < drv_data->rx_end)) { read_SSDR(reg); drv_data->rx += n_bytes; } return drv_data->rx == drv_data->rx_end; } static int u8_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; write_SSDR(*(u8 *)(drv_data->tx), reg); ++drv_data->tx; return 1; } static int u8_reader(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; while ((read_SSSR(reg) & SSSR_RNE) && (drv_data->rx < drv_data->rx_end)) { *(u8 *)(drv_data->rx) = read_SSDR(reg); ++drv_data->rx; } return drv_data->rx == drv_data->rx_end; } static int u16_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; write_SSDR(*(u16 *)(drv_data->tx), reg); drv_data->tx += 2; return 1; } static int u16_reader(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; while ((read_SSSR(reg) & SSSR_RNE) && (drv_data->rx < drv_data->rx_end)) { *(u16 *)(drv_data->rx) = read_SSDR(reg); drv_data->rx += 2; } return drv_data->rx == drv_data->rx_end; } static int u32_writer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) || (drv_data->tx == drv_data->tx_end)) return 0; write_SSDR(*(u32 *)(drv_data->tx), reg); drv_data->tx += 4; return 1; } static int u32_reader(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; while ((read_SSSR(reg) & SSSR_RNE) && (drv_data->rx < drv_data->rx_end)) { *(u32 *)(drv_data->rx) = read_SSDR(reg); drv_data->rx += 4; } return drv_data->rx == drv_data->rx_end; } static void *next_transfer(struct driver_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; /* Move to next transfer */ if (trans->transfer_list.next != &msg->transfers) { drv_data->cur_transfer = list_entry(trans->transfer_list.next, struct spi_transfer, transfer_list); return RUNNING_STATE; } else return DONE_STATE; } static int map_dma_buffers(struct driver_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct device *dev = &msg->spi->dev; if (!drv_data->cur_chip->enable_dma) return 0; if (msg->is_dma_mapped) return drv_data->rx_dma && drv_data->tx_dma; if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) return 0; /* Modify setup if rx buffer is null */ if (drv_data->rx == NULL) { *drv_data->null_dma_buf = 0; drv_data->rx = drv_data->null_dma_buf; drv_data->rx_map_len = 4; } else drv_data->rx_map_len = drv_data->len; /* Modify setup if tx buffer is null */ if (drv_data->tx == NULL) { *drv_data->null_dma_buf = 0; drv_data->tx = drv_data->null_dma_buf; drv_data->tx_map_len = 4; } else drv_data->tx_map_len = drv_data->len; /* Stream map the tx buffer. Always do DMA_TO_DEVICE first * so we flush the cache *before* invalidating it, in case * the tx and rx buffers overlap. */ drv_data->tx_dma = dma_map_single(dev, drv_data->tx, drv_data->tx_map_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, drv_data->tx_dma)) return 0; /* Stream map the rx buffer */ drv_data->rx_dma = dma_map_single(dev, drv_data->rx, drv_data->rx_map_len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, drv_data->rx_dma)) { dma_unmap_single(dev, drv_data->tx_dma, drv_data->tx_map_len, DMA_TO_DEVICE); return 0; } return 1; } static void unmap_dma_buffers(struct driver_data *drv_data) { struct device *dev; if (!drv_data->dma_mapped) return; if (!drv_data->cur_msg->is_dma_mapped) { dev = &drv_data->cur_msg->spi->dev; dma_unmap_single(dev, drv_data->rx_dma, drv_data->rx_map_len, DMA_FROM_DEVICE); dma_unmap_single(dev, drv_data->tx_dma, drv_data->tx_map_len, DMA_TO_DEVICE); } drv_data->dma_mapped = 0; } /* caller already set message->status; dma and pio irqs are blocked */ static void giveback(struct driver_data *drv_data) { struct spi_transfer* last_transfer; unsigned long flags; struct spi_message *msg; spin_lock_irqsave(&drv_data->lock, flags); msg = drv_data->cur_msg; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); last_transfer = list_entry(msg->transfers.prev, struct spi_transfer, transfer_list); /* Delay if requested before any change in chip select */ if (last_transfer->delay_usecs) udelay(last_transfer->delay_usecs); /* Drop chip select UNLESS cs_change is true or we are returning * a message with an error, or next message is for another chip */ if (!last_transfer->cs_change) cs_deassert(drv_data); else { struct spi_message *next_msg; /* Holding of cs was hinted, but we need to make sure * the next message is for the same chip. Don't waste * time with the following tests unless this was hinted. * * We cannot postpone this until pump_messages, because * after calling msg->complete (below) the driver that * sent the current message could be unloaded, which * could invalidate the cs_control() callback... */ /* get a pointer to the next message, if any */ spin_lock_irqsave(&drv_data->lock, flags); if (list_empty(&drv_data->queue)) next_msg = NULL; else next_msg = list_entry(drv_data->queue.next, struct spi_message, queue); spin_unlock_irqrestore(&drv_data->lock, flags); /* see if the next and current messages point * to the same chip */ if (next_msg && next_msg->spi != msg->spi) next_msg = NULL; if (!next_msg || msg->state == ERROR_STATE) cs_deassert(drv_data); } msg->state = NULL; if (msg->complete) msg->complete(msg->context); drv_data->cur_chip = NULL; } static int wait_ssp_rx_stall(void const __iomem *ioaddr) { unsigned long limit = loops_per_jiffy << 1; while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) cpu_relax(); return limit; } static int wait_dma_channel_stop(int channel) { unsigned long limit = loops_per_jiffy << 1; while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) cpu_relax(); return limit; } static void dma_error_stop(struct driver_data *drv_data, const char *msg) { void __iomem *reg = drv_data->ioaddr; /* Stop and reset */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; write_SSSR_CS(drv_data, drv_data->clear_sr); write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); flush(drv_data); write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); unmap_dma_buffers(drv_data); dev_err(&drv_data->pdev->dev, "%s\n", msg); drv_data->cur_msg->state = ERROR_STATE; tasklet_schedule(&drv_data->pump_transfers); } static void dma_transfer_complete(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; struct spi_message *msg = drv_data->cur_msg; /* Clear and disable interrupts on SSP and DMA channels*/ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); write_SSSR_CS(drv_data, drv_data->clear_sr); DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; if (wait_dma_channel_stop(drv_data->rx_channel) == 0) dev_err(&drv_data->pdev->dev, "dma_handler: dma rx channel stop failed\n"); if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) dev_err(&drv_data->pdev->dev, "dma_transfer: ssp rx stall failed\n"); unmap_dma_buffers(drv_data); /* update the buffer pointer for the amount completed in dma */ drv_data->rx += drv_data->len - (DCMD(drv_data->rx_channel) & DCMD_LENGTH); /* read trailing data from fifo, it does not matter how many * bytes are in the fifo just read until buffer is full * or fifo is empty, which ever occurs first */ drv_data->read(drv_data); /* return count of what was actually read */ msg->actual_length += drv_data->len - (drv_data->rx_end - drv_data->rx); /* Transfer delays and chip select release are * handled in pump_transfers or giveback */ /* Move to next transfer */ msg->state = next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } static void dma_handler(int channel, void *data) { struct driver_data *drv_data = data; u32 irq_status = DCSR(channel) & DMA_INT_MASK; if (irq_status & DCSR_BUSERR) { if (channel == drv_data->tx_channel) dma_error_stop(drv_data, "dma_handler: " "bad bus address on tx channel"); else dma_error_stop(drv_data, "dma_handler: " "bad bus address on rx channel"); return; } /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ if ((channel == drv_data->tx_channel) && (irq_status & DCSR_ENDINTR) && (drv_data->ssp_type == PXA25x_SSP)) { /* Wait for rx to stall */ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) dev_err(&drv_data->pdev->dev, "dma_handler: ssp rx stall failed\n"); /* finish this transfer, start the next */ dma_transfer_complete(drv_data); } } static irqreturn_t dma_transfer(struct driver_data *drv_data) { u32 irq_status; void __iomem *reg = drv_data->ioaddr; irq_status = read_SSSR(reg) & drv_data->mask_sr; if (irq_status & SSSR_ROR) { dma_error_stop(drv_data, "dma_transfer: fifo overrun"); return IRQ_HANDLED; } /* Check for false positive timeout */ if ((irq_status & SSSR_TINT) && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { write_SSSR(SSSR_TINT, reg); return IRQ_HANDLED; } if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { /* Clear and disable timeout interrupt, do the rest in * dma_transfer_complete */ if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); /* finish this transfer, start the next */ dma_transfer_complete(drv_data); return IRQ_HANDLED; } /* Opps problem detected */ return IRQ_NONE; } static void reset_sccr1(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; struct chip_data *chip = drv_data->cur_chip; u32 sccr1_reg; sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; sccr1_reg &= ~SSCR1_RFT; sccr1_reg |= chip->threshold; write_SSCR1(sccr1_reg, reg); } static void int_error_stop(struct driver_data *drv_data, const char* msg) { void __iomem *reg = drv_data->ioaddr; /* Stop and reset SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); reset_sccr1(drv_data); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); flush(drv_data); write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); dev_err(&drv_data->pdev->dev, "%s\n", msg); drv_data->cur_msg->state = ERROR_STATE; tasklet_schedule(&drv_data->pump_transfers); } static void int_transfer_complete(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; /* Stop SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); reset_sccr1(drv_data); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); /* Update total byte transferred return count actual bytes read */ drv_data->cur_msg->actual_length += drv_data->len - (drv_data->rx_end - drv_data->rx); /* Transfer delays and chip select release are * handled in pump_transfers or giveback */ /* Move to next transfer */ drv_data->cur_msg->state = next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } static irqreturn_t interrupt_transfer(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; u32 irq_status = read_SSSR(reg) & irq_mask; if (irq_status & SSSR_ROR) { int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); return IRQ_HANDLED; } if (irq_status & SSSR_TINT) { write_SSSR(SSSR_TINT, reg); if (drv_data->read(drv_data)) { int_transfer_complete(drv_data); return IRQ_HANDLED; } } /* Drain rx fifo, Fill tx fifo and prevent overruns */ do { if (drv_data->read(drv_data)) { int_transfer_complete(drv_data); return IRQ_HANDLED; } } while (drv_data->write(drv_data)); if (drv_data->read(drv_data)) { int_transfer_complete(drv_data); return IRQ_HANDLED; } if (drv_data->tx == drv_data->tx_end) { u32 bytes_left; u32 sccr1_reg; sccr1_reg = read_SSCR1(reg); sccr1_reg &= ~SSCR1_TIE; /* * PXA25x_SSP has no timeout, set up rx threshould for the * remaining RX bytes. */ if (pxa25x_ssp_comp(drv_data)) { sccr1_reg &= ~SSCR1_RFT; bytes_left = drv_data->rx_end - drv_data->rx; switch (drv_data->n_bytes) { case 4: bytes_left >>= 1; case 2: bytes_left >>= 1; } if (bytes_left > RX_THRESH_DFLT) bytes_left = RX_THRESH_DFLT; sccr1_reg |= SSCR1_RxTresh(bytes_left); } write_SSCR1(sccr1_reg, reg); } /* We did something */ return IRQ_HANDLED; } static irqreturn_t ssp_int(int irq, void *dev_id) { struct driver_data *drv_data = dev_id; void __iomem *reg = drv_data->ioaddr; u32 sccr1_reg = read_SSCR1(reg); u32 mask = drv_data->mask_sr; u32 status; status = read_SSSR(reg); /* Ignore possible writes if we don't need to write */ if (!(sccr1_reg & SSCR1_TIE)) mask &= ~SSSR_TFS; if (!(status & mask)) return IRQ_NONE; if (!drv_data->cur_msg) { write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); write_SSSR_CS(drv_data, drv_data->clear_sr); dev_err(&drv_data->pdev->dev, "bad message state " "in interrupt handler\n"); /* Never fail */ return IRQ_HANDLED; } return drv_data->transfer_handler(drv_data); } static int set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi, u8 bits_per_word, u32 *burst_code, u32 *threshold) { struct pxa2xx_spi_chip *chip_info = (struct pxa2xx_spi_chip *)spi->controller_data; int bytes_per_word; int burst_bytes; int thresh_words; int req_burst_size; int retval = 0; /* Set the threshold (in registers) to equal the same amount of data * as represented by burst size (in bytes). The computation below * is (burst_size rounded up to nearest 8 byte, word or long word) * divided by (bytes/register); the tx threshold is the inverse of * the rx, so that there will always be enough data in the rx fifo * to satisfy a burst, and there will always be enough space in the * tx fifo to accept a burst (a tx burst will overwrite the fifo if * there is not enough space), there must always remain enough empty * space in the rx fifo for any data loaded to the tx fifo. * Whenever burst_size (in bytes) equals bits/word, the fifo threshold * will be 8, or half the fifo; * The threshold can only be set to 2, 4 or 8, but not 16, because * to burst 16 to the tx fifo, the fifo would have to be empty; * however, the minimum fifo trigger level is 1, and the tx will * request service when the fifo is at this level, with only 15 spaces. */ /* find bytes/word */ if (bits_per_word <= 8) bytes_per_word = 1; else if (bits_per_word <= 16) bytes_per_word = 2; else bytes_per_word = 4; /* use struct pxa2xx_spi_chip->dma_burst_size if available */ if (chip_info) req_burst_size = chip_info->dma_burst_size; else { switch (chip->dma_burst_size) { default: /* if the default burst size is not set, * do it now */ chip->dma_burst_size = DCMD_BURST8; case DCMD_BURST8: req_burst_size = 8; break; case DCMD_BURST16: req_burst_size = 16; break; case DCMD_BURST32: req_burst_size = 32; break; } } if (req_burst_size <= 8) { *burst_code = DCMD_BURST8; burst_bytes = 8; } else if (req_burst_size <= 16) { if (bytes_per_word == 1) { /* don't burst more than 1/2 the fifo */ *burst_code = DCMD_BURST8; burst_bytes = 8; retval = 1; } else { *burst_code = DCMD_BURST16; burst_bytes = 16; } } else { if (bytes_per_word == 1) { /* don't burst more than 1/2 the fifo */ *burst_code = DCMD_BURST8; burst_bytes = 8; retval = 1; } else if (bytes_per_word == 2) { /* don't burst more than 1/2 the fifo */ *burst_code = DCMD_BURST16; burst_bytes = 16; retval = 1; } else { *burst_code = DCMD_BURST32; burst_bytes = 32; } } thresh_words = burst_bytes / bytes_per_word; /* thresh_words will be between 2 and 8 */ *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); return retval; } static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate) { unsigned long ssp_clk = clk_get_rate(ssp->clk); if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; else return ((ssp_clk / rate - 1) & 0xfff) << 8; } static void pump_transfers(unsigned long data) { struct driver_data *drv_data = (struct driver_data *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct chip_data *chip = NULL; struct ssp_device *ssp = drv_data->ssp; void __iomem *reg = drv_data->ioaddr; u32 clk_div = 0; u8 bits = 0; u32 speed = 0; u32 cr0; u32 cr1; u32 dma_thresh = drv_data->cur_chip->dma_threshold; u32 dma_burst = drv_data->cur_chip->dma_burst_size; /* Get current state information */ message = drv_data->cur_msg; transfer = drv_data->cur_transfer; chip = drv_data->cur_chip; /* Handle for abort */ if (message->state == ERROR_STATE) { message->status = -EIO; giveback(drv_data); return; } /* Handle end of message */ if (message->state == DONE_STATE) { message->status = 0; giveback(drv_data); return; } /* Delay if requested at end of transfer before CS change */ if (message->state == RUNNING_STATE) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); /* Drop chip select only if cs_change is requested */ if (previous->cs_change) cs_deassert(drv_data); } /* Check for transfers that need multiple DMA segments */ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { /* reject already-mapped transfers; PIO won't always work */ if (message->is_dma_mapped || transfer->rx_dma || transfer->tx_dma) { dev_err(&drv_data->pdev->dev, "pump_transfers: mapped transfer length " "of %u is greater than %d\n", transfer->len, MAX_DMA_LEN); message->status = -EINVAL; giveback(drv_data); return; } /* warn ... we force this to PIO mode */ if (printk_ratelimit()) dev_warn(&message->spi->dev, "pump_transfers: " "DMA disabled for transfer length %ld " "greater than %d\n", (long)drv_data->len, MAX_DMA_LEN); } /* Setup the transfer state based on the type of transfer */ if (flush(drv_data) == 0) { dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); message->status = -EIO; giveback(drv_data); return; } drv_data->n_bytes = chip->n_bytes; drv_data->dma_width = chip->dma_width; drv_data->tx = (void *)transfer->tx_buf; drv_data->tx_end = drv_data->tx + transfer->len; drv_data->rx = transfer->rx_buf; drv_data->rx_end = drv_data->rx + transfer->len; drv_data->rx_dma = transfer->rx_dma; drv_data->tx_dma = transfer->tx_dma; drv_data->len = transfer->len & DCMD_LENGTH; drv_data->write = drv_data->tx ? chip->write : null_writer; drv_data->read = drv_data->rx ? chip->read : null_reader; /* Change speed and bit per word on a per transfer */ cr0 = chip->cr0; if (transfer->speed_hz || transfer->bits_per_word) { bits = chip->bits_per_word; speed = chip->speed_hz; if (transfer->speed_hz) speed = transfer->speed_hz; if (transfer->bits_per_word) bits = transfer->bits_per_word; clk_div = ssp_get_clk_div(ssp, speed); if (bits <= 8) { drv_data->n_bytes = 1; drv_data->dma_width = DCMD_WIDTH1; drv_data->read = drv_data->read != null_reader ? u8_reader : null_reader; drv_data->write = drv_data->write != null_writer ? u8_writer : null_writer; } else if (bits <= 16) { drv_data->n_bytes = 2; drv_data->dma_width = DCMD_WIDTH2; drv_data->read = drv_data->read != null_reader ? u16_reader : null_reader; drv_data->write = drv_data->write != null_writer ? u16_writer : null_writer; } else if (bits <= 32) { drv_data->n_bytes = 4; drv_data->dma_width = DCMD_WIDTH4; drv_data->read = drv_data->read != null_reader ? u32_reader : null_reader; drv_data->write = drv_data->write != null_writer ? u32_writer : null_writer; } /* if bits/word is changed in dma mode, then must check the * thresholds and burst also */ if (chip->enable_dma) { if (set_dma_burst_and_threshold(chip, message->spi, bits, &dma_burst, &dma_thresh)) if (printk_ratelimit()) dev_warn(&message->spi->dev, "pump_transfers: " "DMA burst size reduced to " "match bits_per_word\n"); } cr0 = clk_div | SSCR0_Motorola | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) | SSCR0_SSE | (bits > 16 ? SSCR0_EDSS : 0); } message->state = RUNNING_STATE; /* Try to map dma buffer and do a dma transfer if successful, but * only if the length is non-zero and less than MAX_DMA_LEN. * * Zero-length non-descriptor DMA is illegal on PXA2xx; force use * of PIO instead. Care is needed above because the transfer may * have have been passed with buffers that are already dma mapped. * A zero-length transfer in PIO mode will not try to write/read * to/from the buffers * * REVISIT large transfers are exactly where we most want to be * using DMA. If this happens much, split those transfers into * multiple DMA segments rather than forcing PIO. */ drv_data->dma_mapped = 0; if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) drv_data->dma_mapped = map_dma_buffers(drv_data); if (drv_data->dma_mapped) { /* Ensure we have the correct interrupt handler */ drv_data->transfer_handler = dma_transfer; /* Setup rx DMA Channel */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; DTADR(drv_data->rx_channel) = drv_data->rx_dma; if (drv_data->rx == drv_data->null_dma_buf) /* No target address increment */ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | drv_data->dma_width | dma_burst | drv_data->len; else DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | DCMD_FLOWSRC | drv_data->dma_width | dma_burst | drv_data->len; /* Setup tx DMA Channel */ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DSADR(drv_data->tx_channel) = drv_data->tx_dma; DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; if (drv_data->tx == drv_data->null_dma_buf) /* No source address increment */ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | drv_data->dma_width | dma_burst | drv_data->len; else DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | DCMD_FLOWTRG | drv_data->dma_width | dma_burst | drv_data->len; /* Enable dma end irqs on SSP to detect end of transfer */ if (drv_data->ssp_type == PXA25x_SSP) DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; /* Clear status and start DMA engine */ cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; write_SSSR(drv_data->clear_sr, reg); DCSR(drv_data->rx_channel) |= DCSR_RUN; DCSR(drv_data->tx_channel) |= DCSR_RUN; } else { /* Ensure we have the correct interrupt handler */ drv_data->transfer_handler = interrupt_transfer; /* Clear status */ cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; write_SSSR_CS(drv_data, drv_data->clear_sr); } /* see if we need to reload the config registers */ if ((read_SSCR0(reg) != cr0) || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != (cr1 & SSCR1_CHANGE_MASK)) { /* stop the SSP, and update the other bits */ write_SSCR0(cr0 & ~SSCR0_SSE, reg); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(chip->timeout, reg); /* first set CR1 without interrupt and service enables */ write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); /* restart the SSP */ write_SSCR0(cr0, reg); } else { if (!pxa25x_ssp_comp(drv_data)) write_SSTO(chip->timeout, reg); } cs_assert(drv_data); /* after chip select, release the data by enabling service * requests and interrupts, without changing any mode bits */ write_SSCR1(cr1, reg); } static void pump_messages(struct work_struct *work) { struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages); unsigned long flags; /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Make sure we are not already running a message */ if (drv_data->cur_msg) { spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Extract head of queue */ drv_data->cur_msg = list_entry(drv_data->queue.next, struct spi_message, queue); list_del_init(&drv_data->cur_msg->queue); /* Initial message state*/ drv_data->cur_msg->state = START_STATE; drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, struct spi_transfer, transfer_list); /* prepare to setup the SSP, in pump_transfers, using the per * chip configuration */ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); /* Mark as busy and launch transfers */ tasklet_schedule(&drv_data->pump_transfers); drv_data->busy = 1; spin_unlock_irqrestore(&drv_data->lock, flags); } static int transfer(struct spi_device *spi, struct spi_message *msg) { struct driver_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (drv_data->run == QUEUE_STOPPED) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; msg->state = START_STATE; list_add_tail(&msg->queue, &drv_data->queue); if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); return 0; } static int setup_cs(struct spi_device *spi, struct chip_data *chip, struct pxa2xx_spi_chip *chip_info) { int err = 0; if (chip == NULL || chip_info == NULL) return 0; /* NOTE: setup() can be called multiple times, possibly with * different chip_info, release previously requested GPIO */ if (gpio_is_valid(chip->gpio_cs)) gpio_free(chip->gpio_cs); /* If (*cs_control) is provided, ignore GPIO chip select */ if (chip_info->cs_control) { chip->cs_control = chip_info->cs_control; return 0; } if (gpio_is_valid(chip_info->gpio_cs)) { err = gpio_request(chip_info->gpio_cs, "SPI_CS"); if (err) { dev_err(&spi->dev, "failed to request chip select " "GPIO%d\n", chip_info->gpio_cs); return err; } chip->gpio_cs = chip_info->gpio_cs; chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH; err = gpio_direction_output(chip->gpio_cs, !chip->gpio_cs_inverted); } return err; } static int setup(struct spi_device *spi) { struct pxa2xx_spi_chip *chip_info = NULL; struct chip_data *chip; struct driver_data *drv_data = spi_master_get_devdata(spi->master); struct ssp_device *ssp = drv_data->ssp; unsigned int clk_div; uint tx_thres = TX_THRESH_DFLT; uint rx_thres = RX_THRESH_DFLT; if (!pxa25x_ssp_comp(drv_data) && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " "b/w not 4-32 for type non-PXA25x_SSP\n", drv_data->ssp_type, spi->bits_per_word); return -EINVAL; } else if (pxa25x_ssp_comp(drv_data) && (spi->bits_per_word < 4 || spi->bits_per_word > 16)) { dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " "b/w not 4-16 for type PXA25x_SSP\n", drv_data->ssp_type, spi->bits_per_word); return -EINVAL; } /* Only alloc on first setup */ chip = spi_get_ctldata(spi); if (!chip) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) { dev_err(&spi->dev, "failed setup: can't allocate chip data\n"); return -ENOMEM; } if (drv_data->ssp_type == CE4100_SSP) { if (spi->chip_select > 4) { dev_err(&spi->dev, "failed setup: " "cs number must not be > 4.\n"); kfree(chip); return -EINVAL; } chip->frm = spi->chip_select; } else chip->gpio_cs = -1; chip->enable_dma = 0; chip->timeout = TIMOUT_DFLT; chip->dma_burst_size = drv_data->master_info->enable_dma ? DCMD_BURST8 : 0; } /* protocol drivers may change the chip settings, so... * if chip_info exists, use it */ chip_info = spi->controller_data; /* chip_info isn't always needed */ chip->cr1 = 0; if (chip_info) { if (chip_info->timeout) chip->timeout = chip_info->timeout; if (chip_info->tx_threshold) tx_thres = chip_info->tx_threshold; if (chip_info->rx_threshold) rx_thres = chip_info->rx_threshold; chip->enable_dma = drv_data->master_info->enable_dma; chip->dma_threshold = 0; if (chip_info->enable_loopback) chip->cr1 = SSCR1_LBM; } chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); /* set dma burst and threshold outside of chip_info path so that if * chip_info goes away after setting chip->enable_dma, the * burst and threshold can still respond to changes in bits_per_word */ if (chip->enable_dma) { /* set up legal burst and threshold for dma */ if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, &chip->dma_burst_size, &chip->dma_threshold)) { dev_warn(&spi->dev, "in setup: DMA burst size reduced " "to match bits_per_word\n"); } } clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz); chip->speed_hz = spi->max_speed_hz; chip->cr0 = clk_div | SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ? spi->bits_per_word - 16 : spi->bits_per_word) | SSCR0_SSE | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); /* NOTE: PXA25x_SSP _could_ use external clocking ... */ if (!pxa25x_ssp_comp(drv_data)) dev_dbg(&spi->dev, "%ld Hz actual, %s\n", clk_get_rate(ssp->clk) / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), chip->enable_dma ? "DMA" : "PIO"); else dev_dbg(&spi->dev, "%ld Hz actual, %s\n", clk_get_rate(ssp->clk) / 2 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), chip->enable_dma ? "DMA" : "PIO"); if (spi->bits_per_word <= 8) { chip->n_bytes = 1; chip->dma_width = DCMD_WIDTH1; chip->read = u8_reader; chip->write = u8_writer; } else if (spi->bits_per_word <= 16) { chip->n_bytes = 2; chip->dma_width = DCMD_WIDTH2; chip->read = u16_reader; chip->write = u16_writer; } else if (spi->bits_per_word <= 32) { chip->cr0 |= SSCR0_EDSS; chip->n_bytes = 4; chip->dma_width = DCMD_WIDTH4; chip->read = u32_reader; chip->write = u32_writer; } else { dev_err(&spi->dev, "invalid wordsize\n"); return -ENODEV; } chip->bits_per_word = spi->bits_per_word; spi_set_ctldata(spi, chip); if (drv_data->ssp_type == CE4100_SSP) return 0; return setup_cs(spi, chip, chip_info); } static void cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); struct driver_data *drv_data = spi_master_get_devdata(spi->master); if (!chip) return; if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) gpio_free(chip->gpio_cs); kfree(chip); } static int __devinit init_queue(struct driver_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); drv_data->run = QUEUE_STOPPED; drv_data->busy = 0; tasklet_init(&drv_data->pump_transfers, pump_transfers, (unsigned long)drv_data); INIT_WORK(&drv_data->pump_messages, pump_messages); drv_data->workqueue = create_singlethread_workqueue( dev_name(drv_data->master->dev.parent)); if (drv_data->workqueue == NULL) return -EBUSY; return 0; } static int start_queue(struct driver_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } drv_data->run = QUEUE_RUNNING; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; spin_unlock_irqrestore(&drv_data->lock, flags); queue_work(drv_data->workqueue, &drv_data->pump_messages); return 0; } static int stop_queue(struct driver_data *drv_data) { unsigned long flags; unsigned limit = 500; int status = 0; spin_lock_irqsave(&drv_data->lock, flags); /* This is a bit lame, but is optimized for the common execution path. * A wait_queue on the drv_data->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ drv_data->run = QUEUE_STOPPED; while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); spin_lock_irqsave(&drv_data->lock, flags); } if (!list_empty(&drv_data->queue) || drv_data->busy) status = -EBUSY; spin_unlock_irqrestore(&drv_data->lock, flags); return status; } static int destroy_queue(struct driver_data *drv_data) { int status; status = stop_queue(drv_data); /* we are unloading the module or failing to load (only two calls * to this routine), and neither call can handle a return value. * However, destroy_workqueue calls flush_workqueue, and that will * block until all work is done. If the reason that stop_queue * timed out is that the work will never finish, then it does no * good to call destroy_workqueue, so return anyway. */ if (status != 0) return status; destroy_workqueue(drv_data->workqueue); return 0; } static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pxa2xx_spi_master *platform_info; struct spi_master *master; struct driver_data *drv_data; struct ssp_device *ssp; int status; platform_info = dev->platform_data; ssp = pxa_ssp_request(pdev->id, pdev->name); if (ssp == NULL) { dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); return -ENODEV; } /* Allocate master with space for drv_data and null dma buffer */ master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); if (!master) { dev_err(&pdev->dev, "cannot alloc spi_master\n"); pxa_ssp_free(ssp); return -ENOMEM; } drv_data = spi_master_get_devdata(master); drv_data->master = master; drv_data->master_info = platform_info; drv_data->pdev = pdev; drv_data->ssp = ssp; master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->dma_alignment = DMA_ALIGNMENT; master->cleanup = cleanup; master->setup = setup; master->transfer = transfer; drv_data->ssp_type = ssp->type; drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + sizeof(struct driver_data)), 8); drv_data->ioaddr = ssp->mmio_base; drv_data->ssdr_physical = ssp->phys_base + SSDR; if (pxa25x_ssp_comp(drv_data)) { drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; drv_data->dma_cr1 = 0; drv_data->clear_sr = SSSR_ROR; drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; } else { drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; drv_data->clear_sr = SSSR_ROR | SSSR_TINT; drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; } status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), drv_data); if (status < 0) { dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); goto out_error_master_alloc; } /* Setup DMA if requested */ drv_data->tx_channel = -1; drv_data->rx_channel = -1; if (platform_info->enable_dma) { /* Get two DMA channels (rx and tx) */ drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", DMA_PRIO_HIGH, dma_handler, drv_data); if (drv_data->rx_channel < 0) { dev_err(dev, "problem (%d) requesting rx channel\n", drv_data->rx_channel); status = -ENODEV; goto out_error_irq_alloc; } drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", DMA_PRIO_MEDIUM, dma_handler, drv_data); if (drv_data->tx_channel < 0) { dev_err(dev, "problem (%d) requesting tx channel\n", drv_data->tx_channel); status = -ENODEV; goto out_error_dma_alloc; } DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; } /* Enable SOC clock */ clk_enable(ssp->clk); /* Load default SSP configuration */ write_SSCR0(0, drv_data->ioaddr); write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT), drv_data->ioaddr); write_SSCR0(SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8), drv_data->ioaddr); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, drv_data->ioaddr); write_SSPSP(0, drv_data->ioaddr); /* Initial and start queue */ status = init_queue(drv_data); if (status != 0) { dev_err(&pdev->dev, "problem initializing queue\n"); goto out_error_clock_enabled; } status = start_queue(drv_data); if (status != 0) { dev_err(&pdev->dev, "problem starting queue\n"); goto out_error_clock_enabled; } /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); if (status != 0) { dev_err(&pdev->dev, "problem registering spi master\n"); goto out_error_queue_alloc; } return status; out_error_queue_alloc: destroy_queue(drv_data); out_error_clock_enabled: clk_disable(ssp->clk); out_error_dma_alloc: if (drv_data->tx_channel != -1) pxa_free_dma(drv_data->tx_channel); if (drv_data->rx_channel != -1) pxa_free_dma(drv_data->rx_channel); out_error_irq_alloc: free_irq(ssp->irq, drv_data); out_error_master_alloc: spi_master_put(master); pxa_ssp_free(ssp); return status; } static int pxa2xx_spi_remove(struct platform_device *pdev) { struct driver_data *drv_data = platform_get_drvdata(pdev); struct ssp_device *ssp; int status = 0; if (!drv_data) return 0; ssp = drv_data->ssp; /* Remove the queue */ status = destroy_queue(drv_data); if (status != 0) /* the kernel does not check the return status of this * this routine (mod->exit, within the kernel). Therefore * nothing is gained by returning from here, the module is * going away regardless, and we should not leave any more * resources allocated than necessary. We cannot free the * message memory in drv_data->queue, but we can release the * resources below. I think the kernel should honor -EBUSY * returns but... */ dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not " "complete, message memory not freed\n"); /* Disable the SSP at the peripheral and SOC level */ write_SSCR0(0, drv_data->ioaddr); clk_disable(ssp->clk); /* Release DMA */ if (drv_data->master_info->enable_dma) { DRCMR(ssp->drcmr_rx) = 0; DRCMR(ssp->drcmr_tx) = 0; pxa_free_dma(drv_data->tx_channel); pxa_free_dma(drv_data->rx_channel); } /* Release IRQ */ free_irq(ssp->irq, drv_data); /* Release SSP */ pxa_ssp_free(ssp); /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); /* Prevent double remove */ platform_set_drvdata(pdev, NULL); return 0; } static void pxa2xx_spi_shutdown(struct platform_device *pdev) { int status = 0; if ((status = pxa2xx_spi_remove(pdev)) != 0) dev_err(&pdev->dev, "shutdown failed with %d\n", status); } #ifdef CONFIG_PM static int pxa2xx_spi_suspend(struct device *dev) { struct driver_data *drv_data = dev_get_drvdata(dev); struct ssp_device *ssp = drv_data->ssp; int status = 0; status = stop_queue(drv_data); if (status != 0) return status; write_SSCR0(0, drv_data->ioaddr); clk_disable(ssp->clk); return 0; } static int pxa2xx_spi_resume(struct device *dev) { struct driver_data *drv_data = dev_get_drvdata(dev); struct ssp_device *ssp = drv_data->ssp; int status = 0; if (drv_data->rx_channel != -1) DRCMR(drv_data->ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; if (drv_data->tx_channel != -1) DRCMR(drv_data->ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; /* Enable the SSP clock */ clk_enable(ssp->clk); /* Start the queue running */ status = start_queue(drv_data); if (status != 0) { dev_err(dev, "problem starting queue (%d)\n", status); return status; } return 0; } static const struct dev_pm_ops pxa2xx_spi_pm_ops = { .suspend = pxa2xx_spi_suspend, .resume = pxa2xx_spi_resume, }; #endif static struct platform_driver driver = { .driver = { .name = "pxa2xx-spi", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pxa2xx_spi_pm_ops, #endif }, .probe = pxa2xx_spi_probe, .remove = pxa2xx_spi_remove, .shutdown = pxa2xx_spi_shutdown, }; static int __init pxa2xx_spi_init(void) { return platform_driver_register(&driver); } subsys_initcall(pxa2xx_spi_init); static void __exit pxa2xx_spi_exit(void) { platform_driver_unregister(&driver); } module_exit(pxa2xx_spi_exit);
gpl-2.0
bilalliberty/depricated-kernel-villec2--3.4-
sound/oss/vwsnd.c
8193
97691
/* * Sound driver for Silicon Graphics 320 and 540 Visual Workstations' * onboard audio. See notes in Documentation/sound/oss/vwsnd . * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef VWSND_DEBUG /* define for debugging */ /* * XXX to do - * * External sync. * Rename swbuf, hwbuf, u&i, hwptr&swptr to something rational. * Bug - if select() called before read(), pcm_setup() not called. * Bug - output doesn't stop soon enough if process killed. */ /* * Things to test - * * Will readv/writev work? Write a test. * * insmod/rmmod 100 million times. * * Run I/O until int ptrs wrap around (roughly 6.2 hours @ DAT * rate). * * Concurrent threads banging on mixer simultaneously, both UP * and SMP kernels. Especially, watch for thread A changing * OUTSRC while thread B changes gain -- both write to the same * ad1843 register. * * What happens if a client opens /dev/audio then forks? * Do two procs have /dev/audio open? Test. * * Pump audio through the CD, MIC and line inputs and verify that * they mix/mute into the output. * * Apps: * amp * mpg123 * x11amp * mxv * kmedia * esound * need more input apps * * Run tests while bombarding with signals. setitimer(2) will do it... */ /* * This driver is organized in nine sections. * The nine sections are: * * debug stuff * low level lithium access * high level lithium access * AD1843 access * PCM I/O * audio driver * mixer driver * probe/attach/unload * initialization and loadable kernel module interface * * That is roughly the order of increasing abstraction, so forward * dependencies are minimal. */ /* * Locking Notes * * INC_USE_COUNT and DEC_USE_COUNT keep track of the number of * open descriptors to this driver. They store it in vwsnd_use_count. * The global device list, vwsnd_dev_list, is immutable when the IN_USE * is true. * * devc->open_lock is a semaphore that is used to enforce the * single reader/single writer rule for /dev/audio. The rule is * that each device may have at most one reader and one writer. * Open will block until the previous client has closed the * device, unless O_NONBLOCK is specified. * * The semaphore devc->io_mutex serializes PCM I/O syscalls. This * is unnecessary in Linux 2.2, because the kernel lock * serializes read, write, and ioctl globally, but it's there, * ready for the brave, new post-kernel-lock world. * * Locking between interrupt and baselevel is handled by the * "lock" spinlock in vwsnd_port (one lock each for read and * write). Each half holds the lock just long enough to see what * area it owns and update its pointers. See pcm_output() and * pcm_input() for most of the gory stuff. * * devc->mix_mutex serializes all mixer ioctls. This is also * redundant because of the kernel lock. * * The lowest level lock is lith->lithium_lock. It is a * spinlock which is held during the two-register tango of * reading/writing an AD1843 register. See * li_{read,write}_ad1843_reg(). */ /* * Sample Format Notes * * Lithium's DMA engine has two formats: 16-bit 2's complement * and 8-bit unsigned . 16-bit transfers the data unmodified, 2 * bytes per sample. 8-bit unsigned transfers 1 byte per sample * and XORs each byte with 0x80. Lithium can input or output * either mono or stereo in either format. * * The AD1843 has four formats: 16-bit 2's complement, 8-bit * unsigned, 8-bit mu-Law and 8-bit A-Law. * * This driver supports five formats: AFMT_S8, AFMT_U8, * AFMT_MU_LAW, AFMT_A_LAW, and AFMT_S16_LE. * * For AFMT_U8 output, we keep the AD1843 in 16-bit mode, and * rely on Lithium's XOR to translate between U8 and S8. * * For AFMT_S8, AFMT_MU_LAW and AFMT_A_LAW output, we have to XOR * the 0x80 bit in software to compensate for Lithium's XOR. * This happens in pcm_copy_{in,out}(). * * Changes: * 11-10-2000 Bartlomiej Zolnierkiewicz <bkz@linux-ide.org> * Added some __init/__exit */ #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/visws/cobalt.h> #include "sound_config.h" /*****************************************************************************/ /* debug stuff */ #ifdef VWSND_DEBUG static DEFINE_MUTEX(vwsnd_mutex); static int shut_up = 1; /* * dbgassert - called when an assertion fails. */ static void dbgassert(const char *fcn, int line, const char *expr) { if (in_interrupt()) panic("ASSERTION FAILED IN INTERRUPT, %s:%s:%d %s\n", __FILE__, fcn, line, expr); else { int x; printk(KERN_ERR "ASSERTION FAILED, %s:%s:%d %s\n", __FILE__, fcn, line, expr); x = * (volatile int *) 0; /* force proc to exit */ } } /* * Bunch of useful debug macros: * * ASSERT - print unless e nonzero (panic if in interrupt) * DBGDO - include arbitrary code if debugging * DBGX - debug print raw (w/o function name) * DBGP - debug print w/ function name * DBGE - debug print function entry * DBGC - debug print function call * DBGR - debug print function return * DBGXV - debug print raw when verbose * DBGPV - debug print when verbose * DBGEV - debug print function entry when verbose * DBGRV - debug print function return when verbose */ #define ASSERT(e) ((e) ? (void) 0 : dbgassert(__func__, __LINE__, #e)) #define DBGDO(x) x #define DBGX(fmt, args...) (in_interrupt() ? 0 : printk(KERN_ERR fmt, ##args)) #define DBGP(fmt, args...) (DBGX("%s: " fmt, __func__ , ##args)) #define DBGE(fmt, args...) (DBGX("%s" fmt, __func__ , ##args)) #define DBGC(rtn) (DBGP("calling %s\n", rtn)) #define DBGR() (DBGP("returning\n")) #define DBGXV(fmt, args...) (shut_up ? 0 : DBGX(fmt, ##args)) #define DBGPV(fmt, args...) (shut_up ? 0 : DBGP(fmt, ##args)) #define DBGEV(fmt, args...) (shut_up ? 0 : DBGE(fmt, ##args)) #define DBGCV(rtn) (shut_up ? 0 : DBGC(rtn)) #define DBGRV() (shut_up ? 0 : DBGR()) #else /* !VWSND_DEBUG */ #define ASSERT(e) ((void) 0) #define DBGDO(x) /* don't */ #define DBGX(fmt, args...) ((void) 0) #define DBGP(fmt, args...) ((void) 0) #define DBGE(fmt, args...) ((void) 0) #define DBGC(rtn) ((void) 0) #define DBGR() ((void) 0) #define DBGPV(fmt, args...) ((void) 0) #define DBGXV(fmt, args...) ((void) 0) #define DBGEV(fmt, args...) ((void) 0) #define DBGCV(rtn) ((void) 0) #define DBGRV() ((void) 0) #endif /* !VWSND_DEBUG */ /*****************************************************************************/ /* low level lithium access */ /* * We need to talk to Lithium registers on three pages. Here are * the pages' offsets from the base address (0xFF001000). */ enum { LI_PAGE0_OFFSET = 0x01000 - 0x1000, /* FF001000 */ LI_PAGE1_OFFSET = 0x0F000 - 0x1000, /* FF00F000 */ LI_PAGE2_OFFSET = 0x10000 - 0x1000, /* FF010000 */ }; /* low-level lithium data */ typedef struct lithium { void * page0; /* virtual addresses */ void * page1; void * page2; spinlock_t lock; /* protects codec and UST/MSC access */ } lithium_t; /* * li_destroy destroys the lithium_t structure and vm mappings. */ static void li_destroy(lithium_t *lith) { if (lith->page0) { iounmap(lith->page0); lith->page0 = NULL; } if (lith->page1) { iounmap(lith->page1); lith->page1 = NULL; } if (lith->page2) { iounmap(lith->page2); lith->page2 = NULL; } } /* * li_create initializes the lithium_t structure and sets up vm mappings * to access the registers. * Returns 0 on success, -errno on failure. */ static int __init li_create(lithium_t *lith, unsigned long baseaddr) { spin_lock_init(&lith->lock); lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE); lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE); lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE); if (!lith->page0 || !lith->page1 || !lith->page2) { li_destroy(lith); return -ENOMEM; } return 0; } /* * basic register accessors - read/write long/byte */ static __inline__ unsigned long li_readl(lithium_t *lith, int off) { return * (volatile unsigned long *) (lith->page0 + off); } static __inline__ unsigned char li_readb(lithium_t *lith, int off) { return * (volatile unsigned char *) (lith->page0 + off); } static __inline__ void li_writel(lithium_t *lith, int off, unsigned long val) { * (volatile unsigned long *) (lith->page0 + off) = val; } static __inline__ void li_writeb(lithium_t *lith, int off, unsigned char val) { * (volatile unsigned char *) (lith->page0 + off) = val; } /*****************************************************************************/ /* High Level Lithium Access */ /* * Lithium DMA Notes * * Lithium has two dedicated DMA channels for audio. They are known * as comm1 and comm2 (communication areas 1 and 2). Comm1 is for * input, and comm2 is for output. Each is controlled by three * registers: BASE (base address), CFG (config) and CCTL * (config/control). * * Each DMA channel points to a physically contiguous ring buffer in * main memory of up to 8 Kbytes. (This driver always uses 8 Kb.) * There are three pointers into the ring buffer: read, write, and * trigger. The pointers are 8 bits each. Each pointer points to * 32-byte "chunks" of data. The DMA engine moves 32 bytes at a time, * so there is no finer-granularity control. * * In comm1, the hardware updates the write ptr, and software updates * the read ptr. In comm2, it's the opposite: hardware updates the * read ptr, and software updates the write ptr. I designate the * hardware-updated ptr as the hwptr, and the software-updated ptr as * the swptr. * * The trigger ptr and trigger mask are used to trigger interrupts. * From the Lithium spec, section 5.6.8, revision of 12/15/1998: * * Trigger Mask Value * * A three bit wide field that represents a power of two mask * that is used whenever the trigger pointer is compared to its * respective read or write pointer. A value of zero here * implies a mask of 0xFF and a value of seven implies a mask * 0x01. This value can be used to sub-divide the ring buffer * into pie sections so that interrupts monitor the progress of * hardware from section to section. * * My interpretation of that is, whenever the hw ptr is updated, it is * compared with the trigger ptr, and the result is masked by the * trigger mask. (Actually, by the complement of the trigger mask.) * If the result is zero, an interrupt is triggered. I.e., interrupt * if ((hwptr & ~mask) == (trptr & ~mask)). The mask is formed from * the trigger register value as mask = (1 << (8 - tmreg)) - 1. * * In yet different words, setting tmreg to 0 causes an interrupt after * every 256 DMA chunks (8192 bytes) or once per traversal of the * ring buffer. Setting it to 7 caues an interrupt every 2 DMA chunks * (64 bytes) or 128 times per traversal of the ring buffer. */ /* Lithium register offsets and bit definitions */ #define LI_HOST_CONTROLLER 0x000 # define LI_HC_RESET 0x00008000 # define LI_HC_LINK_ENABLE 0x00004000 # define LI_HC_LINK_FAILURE 0x00000004 # define LI_HC_LINK_CODEC 0x00000002 # define LI_HC_LINK_READY 0x00000001 #define LI_INTR_STATUS 0x010 #define LI_INTR_MASK 0x014 # define LI_INTR_LINK_ERR 0x00008000 # define LI_INTR_COMM2_TRIG 0x00000008 # define LI_INTR_COMM2_UNDERFLOW 0x00000004 # define LI_INTR_COMM1_TRIG 0x00000002 # define LI_INTR_COMM1_OVERFLOW 0x00000001 #define LI_CODEC_COMMAND 0x018 # define LI_CC_BUSY 0x00008000 # define LI_CC_DIR 0x00000080 # define LI_CC_DIR_RD LI_CC_DIR # define LI_CC_DIR_WR (!LI_CC_DIR) # define LI_CC_ADDR_MASK 0x0000007F #define LI_CODEC_DATA 0x01C #define LI_COMM1_BASE 0x100 #define LI_COMM1_CTL 0x104 # define LI_CCTL_RESET 0x80000000 # define LI_CCTL_SIZE 0x70000000 # define LI_CCTL_DMA_ENABLE 0x08000000 # define LI_CCTL_TMASK 0x07000000 /* trigger mask */ # define LI_CCTL_TPTR 0x00FF0000 /* trigger pointer */ # define LI_CCTL_RPTR 0x0000FF00 # define LI_CCTL_WPTR 0x000000FF #define LI_COMM1_CFG 0x108 # define LI_CCFG_LOCK 0x00008000 # define LI_CCFG_SLOT 0x00000070 # define LI_CCFG_DIRECTION 0x00000008 # define LI_CCFG_DIR_IN (!LI_CCFG_DIRECTION) # define LI_CCFG_DIR_OUT LI_CCFG_DIRECTION # define LI_CCFG_MODE 0x00000004 # define LI_CCFG_MODE_MONO (!LI_CCFG_MODE) # define LI_CCFG_MODE_STEREO LI_CCFG_MODE # define LI_CCFG_FORMAT 0x00000003 # define LI_CCFG_FMT_8BIT 0x00000000 # define LI_CCFG_FMT_16BIT 0x00000001 #define LI_COMM2_BASE 0x10C #define LI_COMM2_CTL 0x110 /* bit definitions are the same as LI_COMM1_CTL */ #define LI_COMM2_CFG 0x114 /* bit definitions are the same as LI_COMM1_CFG */ #define LI_UST_LOW 0x200 /* 64-bit Unadjusted System Time is */ #define LI_UST_HIGH 0x204 /* microseconds since boot */ #define LI_AUDIO1_UST 0x300 /* UST-MSC pairs */ #define LI_AUDIO1_MSC 0x304 /* MSC (Media Stream Counter) */ #define LI_AUDIO2_UST 0x308 /* counts samples actually */ #define LI_AUDIO2_MSC 0x30C /* processed as of time UST */ /* * Lithium's DMA engine operates on chunks of 32 bytes. We call that * a DMACHUNK. */ #define DMACHUNK_SHIFT 5 #define DMACHUNK_SIZE (1 << DMACHUNK_SHIFT) #define BYTES_TO_CHUNKS(bytes) ((bytes) >> DMACHUNK_SHIFT) #define CHUNKS_TO_BYTES(chunks) ((chunks) << DMACHUNK_SHIFT) /* * Two convenient macros to shift bitfields into/out of position. * * Observe that (mask & -mask) is (1 << low_set_bit_of(mask)). * As long as mask is constant, we trust the compiler will change the * multipy and divide into shifts. */ #define SHIFT_FIELD(val, mask) (((val) * ((mask) & -(mask))) & (mask)) #define UNSHIFT_FIELD(val, mask) (((val) & (mask)) / ((mask) & -(mask))) /* * dma_chan_desc is invariant information about a Lithium * DMA channel. There are two instances, li_comm1 and li_comm2. * * Note that the CCTL register fields are write ptr and read ptr, but what * we care about are which pointer is updated by software and which by * hardware. */ typedef struct dma_chan_desc { int basereg; int cfgreg; int ctlreg; int hwptrreg; int swptrreg; int ustreg; int mscreg; unsigned long swptrmask; int ad1843_slot; int direction; /* LI_CCTL_DIR_IN/OUT */ } dma_chan_desc_t; static const dma_chan_desc_t li_comm1 = { LI_COMM1_BASE, /* base register offset */ LI_COMM1_CFG, /* config register offset */ LI_COMM1_CTL, /* control register offset */ LI_COMM1_CTL + 0, /* hw ptr reg offset (write ptr) */ LI_COMM1_CTL + 1, /* sw ptr reg offset (read ptr) */ LI_AUDIO1_UST, /* ust reg offset */ LI_AUDIO1_MSC, /* msc reg offset */ LI_CCTL_RPTR, /* sw ptr bitmask in ctlval */ 2, /* ad1843 serial slot */ LI_CCFG_DIR_IN /* direction */ }; static const dma_chan_desc_t li_comm2 = { LI_COMM2_BASE, /* base register offset */ LI_COMM2_CFG, /* config register offset */ LI_COMM2_CTL, /* control register offset */ LI_COMM2_CTL + 1, /* hw ptr reg offset (read ptr) */ LI_COMM2_CTL + 0, /* sw ptr reg offset (writr ptr) */ LI_AUDIO2_UST, /* ust reg offset */ LI_AUDIO2_MSC, /* msc reg offset */ LI_CCTL_WPTR, /* sw ptr bitmask in ctlval */ 2, /* ad1843 serial slot */ LI_CCFG_DIR_OUT /* direction */ }; /* * dma_chan is variable information about a Lithium DMA channel. * * The desc field points to invariant information. * The lith field points to a lithium_t which is passed * to li_read* and li_write* to access the registers. * The *val fields shadow the lithium registers' contents. */ typedef struct dma_chan { const dma_chan_desc_t *desc; lithium_t *lith; unsigned long baseval; unsigned long cfgval; unsigned long ctlval; } dma_chan_t; /* * ustmsc is a UST/MSC pair (Unadjusted System Time/Media Stream Counter). * UST is time in microseconds since the system booted, and MSC is a * counter that increments with every audio sample. */ typedef struct ustmsc { unsigned long long ust; unsigned long msc; } ustmsc_t; /* * li_ad1843_wait waits until lithium says the AD1843 register * exchange is not busy. Returns 0 on success, -EBUSY on timeout. * * Locking: must be called with lithium_lock held. */ static int li_ad1843_wait(lithium_t *lith) { unsigned long later = jiffies + 2; while (li_readl(lith, LI_CODEC_COMMAND) & LI_CC_BUSY) if (time_after_eq(jiffies, later)) return -EBUSY; return 0; } /* * li_read_ad1843_reg returns the current contents of a 16 bit AD1843 register. * * Returns unsigned register value on success, -errno on failure. */ static int li_read_ad1843_reg(lithium_t *lith, int reg) { int val; ASSERT(!in_interrupt()); spin_lock(&lith->lock); { val = li_ad1843_wait(lith); if (val == 0) { li_writel(lith, LI_CODEC_COMMAND, LI_CC_DIR_RD | reg); val = li_ad1843_wait(lith); } if (val == 0) val = li_readl(lith, LI_CODEC_DATA); } spin_unlock(&lith->lock); DBGXV("li_read_ad1843_reg(lith=0x%p, reg=%d) returns 0x%04x\n", lith, reg, val); return val; } /* * li_write_ad1843_reg writes the specified value to a 16 bit AD1843 register. */ static void li_write_ad1843_reg(lithium_t *lith, int reg, int newval) { spin_lock(&lith->lock); { if (li_ad1843_wait(lith) == 0) { li_writel(lith, LI_CODEC_DATA, newval); li_writel(lith, LI_CODEC_COMMAND, LI_CC_DIR_WR | reg); } } spin_unlock(&lith->lock); } /* * li_setup_dma calculates all the register settings for DMA in a particular * mode. It takes too many arguments. */ static void li_setup_dma(dma_chan_t *chan, const dma_chan_desc_t *desc, lithium_t *lith, unsigned long buffer_paddr, int bufshift, int fragshift, int channels, int sampsize) { unsigned long mode, format; unsigned long size, tmask; DBGEV("(chan=0x%p, desc=0x%p, lith=0x%p, buffer_paddr=0x%lx, " "bufshift=%d, fragshift=%d, channels=%d, sampsize=%d)\n", chan, desc, lith, buffer_paddr, bufshift, fragshift, channels, sampsize); /* Reset the channel first. */ li_writel(lith, desc->ctlreg, LI_CCTL_RESET); ASSERT(channels == 1 || channels == 2); if (channels == 2) mode = LI_CCFG_MODE_STEREO; else mode = LI_CCFG_MODE_MONO; ASSERT(sampsize == 1 || sampsize == 2); if (sampsize == 2) format = LI_CCFG_FMT_16BIT; else format = LI_CCFG_FMT_8BIT; chan->desc = desc; chan->lith = lith; /* * Lithium DMA address register takes a 40-bit physical * address, right-shifted by 8 so it fits in 32 bits. Bit 37 * must be set -- it enables cache coherence. */ ASSERT(!(buffer_paddr & 0xFF)); chan->baseval = (buffer_paddr >> 8) | 1 << (37 - 8); chan->cfgval = ((chan->cfgval & ~LI_CCFG_LOCK) | SHIFT_FIELD(desc->ad1843_slot, LI_CCFG_SLOT) | desc->direction | mode | format); size = bufshift - 6; tmask = 13 - fragshift; /* See Lithium DMA Notes above. */ ASSERT(size >= 2 && size <= 7); ASSERT(tmask >= 1 && tmask <= 7); chan->ctlval = ((chan->ctlval & ~LI_CCTL_RESET) | SHIFT_FIELD(size, LI_CCTL_SIZE) | (chan->ctlval & ~LI_CCTL_DMA_ENABLE) | SHIFT_FIELD(tmask, LI_CCTL_TMASK) | SHIFT_FIELD(0, LI_CCTL_TPTR)); DBGPV("basereg 0x%x = 0x%lx\n", desc->basereg, chan->baseval); DBGPV("cfgreg 0x%x = 0x%lx\n", desc->cfgreg, chan->cfgval); DBGPV("ctlreg 0x%x = 0x%lx\n", desc->ctlreg, chan->ctlval); li_writel(lith, desc->basereg, chan->baseval); li_writel(lith, desc->cfgreg, chan->cfgval); li_writel(lith, desc->ctlreg, chan->ctlval); DBGRV(); } static void li_shutdown_dma(dma_chan_t *chan) { lithium_t *lith = chan->lith; void * lith1 = lith->page1; DBGEV("(chan=0x%p)\n", chan); chan->ctlval &= ~LI_CCTL_DMA_ENABLE; DBGPV("ctlreg 0x%x = 0x%lx\n", chan->desc->ctlreg, chan->ctlval); li_writel(lith, chan->desc->ctlreg, chan->ctlval); /* * Offset 0x500 on Lithium page 1 is an undocumented, * unsupported register that holds the zero sample value. * Lithium is supposed to output zero samples when DMA is * inactive, and repeat the last sample when DMA underflows. * But it has a bug, where, after underflow occurs, the zero * sample is not reset. * * I expect this to break in a future rev of Lithium. */ if (lith1 && chan->desc->direction == LI_CCFG_DIR_OUT) * (volatile unsigned long *) (lith1 + 0x500) = 0; } /* * li_activate_dma always starts dma at the beginning of the buffer. * * N.B., these may be called from interrupt. */ static __inline__ void li_activate_dma(dma_chan_t *chan) { chan->ctlval |= LI_CCTL_DMA_ENABLE; DBGPV("ctlval = 0x%lx\n", chan->ctlval); li_writel(chan->lith, chan->desc->ctlreg, chan->ctlval); } static void li_deactivate_dma(dma_chan_t *chan) { lithium_t *lith = chan->lith; void * lith2 = lith->page2; chan->ctlval &= ~(LI_CCTL_DMA_ENABLE | LI_CCTL_RPTR | LI_CCTL_WPTR); DBGPV("ctlval = 0x%lx\n", chan->ctlval); DBGPV("ctlreg 0x%x = 0x%lx\n", chan->desc->ctlreg, chan->ctlval); li_writel(lith, chan->desc->ctlreg, chan->ctlval); /* * Offsets 0x98 and 0x9C on Lithium page 2 are undocumented, * unsupported registers that are internal copies of the DMA * read and write pointers. Because of a Lithium bug, these * registers aren't zeroed correctly when DMA is shut off. So * we whack them directly. * * I expect this to break in a future rev of Lithium. */ if (lith2 && chan->desc->direction == LI_CCFG_DIR_OUT) { * (volatile unsigned long *) (lith2 + 0x98) = 0; * (volatile unsigned long *) (lith2 + 0x9C) = 0; } } /* * read/write the ring buffer pointers. These routines' arguments and results * are byte offsets from the beginning of the ring buffer. */ static __inline__ int li_read_swptr(dma_chan_t *chan) { const unsigned long mask = chan->desc->swptrmask; return CHUNKS_TO_BYTES(UNSHIFT_FIELD(chan->ctlval, mask)); } static __inline__ int li_read_hwptr(dma_chan_t *chan) { return CHUNKS_TO_BYTES(li_readb(chan->lith, chan->desc->hwptrreg)); } static __inline__ void li_write_swptr(dma_chan_t *chan, int val) { const unsigned long mask = chan->desc->swptrmask; ASSERT(!(val & ~CHUNKS_TO_BYTES(0xFF))); val = BYTES_TO_CHUNKS(val); chan->ctlval = (chan->ctlval & ~mask) | SHIFT_FIELD(val, mask); li_writeb(chan->lith, chan->desc->swptrreg, val); } /* li_read_USTMSC() returns a UST/MSC pair for the given channel. */ static void li_read_USTMSC(dma_chan_t *chan, ustmsc_t *ustmsc) { lithium_t *lith = chan->lith; const dma_chan_desc_t *desc = chan->desc; unsigned long now_low, now_high0, now_high1, chan_ust; spin_lock(&lith->lock); { /* * retry until we do all five reads without the * high word changing. (High word increments * every 2^32 microseconds, i.e., not often) */ do { now_high0 = li_readl(lith, LI_UST_HIGH); now_low = li_readl(lith, LI_UST_LOW); /* * Lithium guarantees these two reads will be * atomic -- ust will not increment after msc * is read. */ ustmsc->msc = li_readl(lith, desc->mscreg); chan_ust = li_readl(lith, desc->ustreg); now_high1 = li_readl(lith, LI_UST_HIGH); } while (now_high0 != now_high1); } spin_unlock(&lith->lock); ustmsc->ust = ((unsigned long long) now_high0 << 32 | chan_ust); } static void li_enable_interrupts(lithium_t *lith, unsigned int mask) { DBGEV("(lith=0x%p, mask=0x%x)\n", lith, mask); /* clear any already-pending interrupts. */ li_writel(lith, LI_INTR_STATUS, mask); /* enable the interrupts. */ mask |= li_readl(lith, LI_INTR_MASK); li_writel(lith, LI_INTR_MASK, mask); } static void li_disable_interrupts(lithium_t *lith, unsigned int mask) { unsigned int keepmask; DBGEV("(lith=0x%p, mask=0x%x)\n", lith, mask); /* disable the interrupts */ keepmask = li_readl(lith, LI_INTR_MASK) & ~mask; li_writel(lith, LI_INTR_MASK, keepmask); /* clear any pending interrupts. */ li_writel(lith, LI_INTR_STATUS, mask); } /* Get the interrupt status and clear all pending interrupts. */ static unsigned int li_get_clear_intr_status(lithium_t *lith) { unsigned int status; status = li_readl(lith, LI_INTR_STATUS); li_writel(lith, LI_INTR_STATUS, ~0); return status & li_readl(lith, LI_INTR_MASK); } static int li_init(lithium_t *lith) { /* 1. System power supplies stabilize. */ /* 2. Assert the ~RESET signal. */ li_writel(lith, LI_HOST_CONTROLLER, LI_HC_RESET); udelay(1); /* 3. Deassert the ~RESET signal and enter a wait period to allow the AD1843 internal clocks and the external crystal oscillator to stabilize. */ li_writel(lith, LI_HOST_CONTROLLER, LI_HC_LINK_ENABLE); udelay(1); return 0; } /*****************************************************************************/ /* AD1843 access */ /* * AD1843 bitfield definitions. All are named as in the AD1843 data * sheet, with ad1843_ prepended and individual bit numbers removed. * * E.g., bits LSS0 through LSS2 become ad1843_LSS. * * Only the bitfields we need are defined. */ typedef struct ad1843_bitfield { char reg; char lo_bit; char nbits; } ad1843_bitfield_t; static const ad1843_bitfield_t ad1843_PDNO = { 0, 14, 1 }, /* Converter Power-Down Flag */ ad1843_INIT = { 0, 15, 1 }, /* Clock Initialization Flag */ ad1843_RIG = { 2, 0, 4 }, /* Right ADC Input Gain */ ad1843_RMGE = { 2, 4, 1 }, /* Right ADC Mic Gain Enable */ ad1843_RSS = { 2, 5, 3 }, /* Right ADC Source Select */ ad1843_LIG = { 2, 8, 4 }, /* Left ADC Input Gain */ ad1843_LMGE = { 2, 12, 1 }, /* Left ADC Mic Gain Enable */ ad1843_LSS = { 2, 13, 3 }, /* Left ADC Source Select */ ad1843_RX1M = { 4, 0, 5 }, /* Right Aux 1 Mix Gain/Atten */ ad1843_RX1MM = { 4, 7, 1 }, /* Right Aux 1 Mix Mute */ ad1843_LX1M = { 4, 8, 5 }, /* Left Aux 1 Mix Gain/Atten */ ad1843_LX1MM = { 4, 15, 1 }, /* Left Aux 1 Mix Mute */ ad1843_RX2M = { 5, 0, 5 }, /* Right Aux 2 Mix Gain/Atten */ ad1843_RX2MM = { 5, 7, 1 }, /* Right Aux 2 Mix Mute */ ad1843_LX2M = { 5, 8, 5 }, /* Left Aux 2 Mix Gain/Atten */ ad1843_LX2MM = { 5, 15, 1 }, /* Left Aux 2 Mix Mute */ ad1843_RMCM = { 7, 0, 5 }, /* Right Mic Mix Gain/Atten */ ad1843_RMCMM = { 7, 7, 1 }, /* Right Mic Mix Mute */ ad1843_LMCM = { 7, 8, 5 }, /* Left Mic Mix Gain/Atten */ ad1843_LMCMM = { 7, 15, 1 }, /* Left Mic Mix Mute */ ad1843_HPOS = { 8, 4, 1 }, /* Headphone Output Voltage Swing */ ad1843_HPOM = { 8, 5, 1 }, /* Headphone Output Mute */ ad1843_RDA1G = { 9, 0, 6 }, /* Right DAC1 Analog/Digital Gain */ ad1843_RDA1GM = { 9, 7, 1 }, /* Right DAC1 Analog Mute */ ad1843_LDA1G = { 9, 8, 6 }, /* Left DAC1 Analog/Digital Gain */ ad1843_LDA1GM = { 9, 15, 1 }, /* Left DAC1 Analog Mute */ ad1843_RDA1AM = { 11, 7, 1 }, /* Right DAC1 Digital Mute */ ad1843_LDA1AM = { 11, 15, 1 }, /* Left DAC1 Digital Mute */ ad1843_ADLC = { 15, 0, 2 }, /* ADC Left Sample Rate Source */ ad1843_ADRC = { 15, 2, 2 }, /* ADC Right Sample Rate Source */ ad1843_DA1C = { 15, 8, 2 }, /* DAC1 Sample Rate Source */ ad1843_C1C = { 17, 0, 16 }, /* Clock 1 Sample Rate Select */ ad1843_C2C = { 20, 0, 16 }, /* Clock 1 Sample Rate Select */ ad1843_DAADL = { 25, 4, 2 }, /* Digital ADC Left Source Select */ ad1843_DAADR = { 25, 6, 2 }, /* Digital ADC Right Source Select */ ad1843_DRSFLT = { 25, 15, 1 }, /* Digital Reampler Filter Mode */ ad1843_ADLF = { 26, 0, 2 }, /* ADC Left Channel Data Format */ ad1843_ADRF = { 26, 2, 2 }, /* ADC Right Channel Data Format */ ad1843_ADTLK = { 26, 4, 1 }, /* ADC Transmit Lock Mode Select */ ad1843_SCF = { 26, 7, 1 }, /* SCLK Frequency Select */ ad1843_DA1F = { 26, 8, 2 }, /* DAC1 Data Format Select */ ad1843_DA1SM = { 26, 14, 1 }, /* DAC1 Stereo/Mono Mode Select */ ad1843_ADLEN = { 27, 0, 1 }, /* ADC Left Channel Enable */ ad1843_ADREN = { 27, 1, 1 }, /* ADC Right Channel Enable */ ad1843_AAMEN = { 27, 4, 1 }, /* Analog to Analog Mix Enable */ ad1843_ANAEN = { 27, 7, 1 }, /* Analog Channel Enable */ ad1843_DA1EN = { 27, 8, 1 }, /* DAC1 Enable */ ad1843_DA2EN = { 27, 9, 1 }, /* DAC2 Enable */ ad1843_C1EN = { 28, 11, 1 }, /* Clock Generator 1 Enable */ ad1843_C2EN = { 28, 12, 1 }, /* Clock Generator 2 Enable */ ad1843_PDNI = { 28, 15, 1 }; /* Converter Power Down */ /* * The various registers of the AD1843 use three different formats for * specifying gain. The ad1843_gain structure parameterizes the * formats. */ typedef struct ad1843_gain { int negative; /* nonzero if gain is negative. */ const ad1843_bitfield_t *lfield; const ad1843_bitfield_t *rfield; } ad1843_gain_t; static const ad1843_gain_t ad1843_gain_RECLEV = { 0, &ad1843_LIG, &ad1843_RIG }; static const ad1843_gain_t ad1843_gain_LINE = { 1, &ad1843_LX1M, &ad1843_RX1M }; static const ad1843_gain_t ad1843_gain_CD = { 1, &ad1843_LX2M, &ad1843_RX2M }; static const ad1843_gain_t ad1843_gain_MIC = { 1, &ad1843_LMCM, &ad1843_RMCM }; static const ad1843_gain_t ad1843_gain_PCM = { 1, &ad1843_LDA1G, &ad1843_RDA1G }; /* read the current value of an AD1843 bitfield. */ static int ad1843_read_bits(lithium_t *lith, const ad1843_bitfield_t *field) { int w = li_read_ad1843_reg(lith, field->reg); int val = w >> field->lo_bit & ((1 << field->nbits) - 1); DBGXV("ad1843_read_bits(lith=0x%p, field->{%d %d %d}) returns 0x%x\n", lith, field->reg, field->lo_bit, field->nbits, val); return val; } /* * write a new value to an AD1843 bitfield and return the old value. */ static int ad1843_write_bits(lithium_t *lith, const ad1843_bitfield_t *field, int newval) { int w = li_read_ad1843_reg(lith, field->reg); int mask = ((1 << field->nbits) - 1) << field->lo_bit; int oldval = (w & mask) >> field->lo_bit; int newbits = (newval << field->lo_bit) & mask; w = (w & ~mask) | newbits; (void) li_write_ad1843_reg(lith, field->reg, w); DBGXV("ad1843_write_bits(lith=0x%p, field->{%d %d %d}, val=0x%x) " "returns 0x%x\n", lith, field->reg, field->lo_bit, field->nbits, newval, oldval); return oldval; } /* * ad1843_read_multi reads multiple bitfields from the same AD1843 * register. It uses a single read cycle to do it. (Reading the * ad1843 requires 256 bit times at 12.288 MHz, or nearly 20 * microseconds.) * * Called ike this. * * ad1843_read_multi(lith, nfields, * &ad1843_FIELD1, &val1, * &ad1843_FIELD2, &val2, ...); */ static void ad1843_read_multi(lithium_t *lith, int argcount, ...) { va_list ap; const ad1843_bitfield_t *fp; int w = 0, mask, *value, reg = -1; va_start(ap, argcount); while (--argcount >= 0) { fp = va_arg(ap, const ad1843_bitfield_t *); value = va_arg(ap, int *); if (reg == -1) { reg = fp->reg; w = li_read_ad1843_reg(lith, reg); } ASSERT(reg == fp->reg); mask = (1 << fp->nbits) - 1; *value = w >> fp->lo_bit & mask; } va_end(ap); } /* * ad1843_write_multi stores multiple bitfields into the same AD1843 * register. It uses one read and one write cycle to do it. * * Called like this. * * ad1843_write_multi(lith, nfields, * &ad1843_FIELD1, val1, * &ad1843_FIELF2, val2, ...); */ static void ad1843_write_multi(lithium_t *lith, int argcount, ...) { va_list ap; int reg; const ad1843_bitfield_t *fp; int value; int w, m, mask, bits; mask = 0; bits = 0; reg = -1; va_start(ap, argcount); while (--argcount >= 0) { fp = va_arg(ap, const ad1843_bitfield_t *); value = va_arg(ap, int); if (reg == -1) reg = fp->reg; ASSERT(fp->reg == reg); m = ((1 << fp->nbits) - 1) << fp->lo_bit; mask |= m; bits |= (value << fp->lo_bit) & m; } va_end(ap); ASSERT(!(bits & ~mask)); if (~mask & 0xFFFF) w = li_read_ad1843_reg(lith, reg); else w = 0; w = (w & ~mask) | bits; (void) li_write_ad1843_reg(lith, reg, w); } /* * ad1843_get_gain reads the specified register and extracts the gain value * using the supplied gain type. It returns the gain in OSS format. */ static int ad1843_get_gain(lithium_t *lith, const ad1843_gain_t *gp) { int lg, rg; unsigned short mask = (1 << gp->lfield->nbits) - 1; ad1843_read_multi(lith, 2, gp->lfield, &lg, gp->rfield, &rg); if (gp->negative) { lg = mask - lg; rg = mask - rg; } lg = (lg * 100 + (mask >> 1)) / mask; rg = (rg * 100 + (mask >> 1)) / mask; return lg << 0 | rg << 8; } /* * Set an audio channel's gain. Converts from OSS format to AD1843's * format. * * Returns the new gain, which may be lower than the old gain. */ static int ad1843_set_gain(lithium_t *lith, const ad1843_gain_t *gp, int newval) { unsigned short mask = (1 << gp->lfield->nbits) - 1; int lg = newval >> 0 & 0xFF; int rg = newval >> 8; if (lg < 0 || lg > 100 || rg < 0 || rg > 100) return -EINVAL; lg = (lg * mask + (mask >> 1)) / 100; rg = (rg * mask + (mask >> 1)) / 100; if (gp->negative) { lg = mask - lg; rg = mask - rg; } ad1843_write_multi(lith, 2, gp->lfield, lg, gp->rfield, rg); return ad1843_get_gain(lith, gp); } /* Returns the current recording source, in OSS format. */ static int ad1843_get_recsrc(lithium_t *lith) { int ls = ad1843_read_bits(lith, &ad1843_LSS); switch (ls) { case 1: return SOUND_MASK_MIC; case 2: return SOUND_MASK_LINE; case 3: return SOUND_MASK_CD; case 6: return SOUND_MASK_PCM; default: ASSERT(0); return -1; } } /* * Enable/disable digital resample mode in the AD1843. * * The AD1843 requires that ADL, ADR, DA1 and DA2 be powered down * while switching modes. So we save DA1's state (DA2's state is not * interesting), power them down, switch into/out of resample mode, * power them up, and restore state. * * This will cause audible glitches if D/A or A/D is going on, so the * driver disallows that (in mixer_write_ioctl()). * * The open question is, is this worth doing? I'm leaving it in, * because it's written, but... */ static void ad1843_set_resample_mode(lithium_t *lith, int onoff) { /* Save DA1 mute and gain (addr 9 is DA1 analog gain/attenuation) */ int save_da1 = li_read_ad1843_reg(lith, 9); /* Power down A/D and D/A. */ ad1843_write_multi(lith, 4, &ad1843_DA1EN, 0, &ad1843_DA2EN, 0, &ad1843_ADLEN, 0, &ad1843_ADREN, 0); /* Switch mode */ ASSERT(onoff == 0 || onoff == 1); ad1843_write_bits(lith, &ad1843_DRSFLT, onoff); /* Power up A/D and D/A. */ ad1843_write_multi(lith, 3, &ad1843_DA1EN, 1, &ad1843_ADLEN, 1, &ad1843_ADREN, 1); /* Restore DA1 mute and gain. */ li_write_ad1843_reg(lith, 9, save_da1); } /* * Set recording source. Arg newsrc specifies an OSS channel mask. * * The complication is that when we switch into/out of loopback mode * (i.e., src = SOUND_MASK_PCM), we change the AD1843 into/out of * digital resampling mode. * * Returns newsrc on success, -errno on failure. */ static int ad1843_set_recsrc(lithium_t *lith, int newsrc) { int bits; int oldbits; switch (newsrc) { case SOUND_MASK_PCM: bits = 6; break; case SOUND_MASK_MIC: bits = 1; break; case SOUND_MASK_LINE: bits = 2; break; case SOUND_MASK_CD: bits = 3; break; default: return -EINVAL; } oldbits = ad1843_read_bits(lith, &ad1843_LSS); if (newsrc == SOUND_MASK_PCM && oldbits != 6) { DBGP("enabling digital resample mode\n"); ad1843_set_resample_mode(lith, 1); ad1843_write_multi(lith, 2, &ad1843_DAADL, 2, &ad1843_DAADR, 2); } else if (newsrc != SOUND_MASK_PCM && oldbits == 6) { DBGP("disabling digital resample mode\n"); ad1843_set_resample_mode(lith, 0); ad1843_write_multi(lith, 2, &ad1843_DAADL, 0, &ad1843_DAADR, 0); } ad1843_write_multi(lith, 2, &ad1843_LSS, bits, &ad1843_RSS, bits); return newsrc; } /* * Return current output sources, in OSS format. */ static int ad1843_get_outsrc(lithium_t *lith) { int pcm, line, mic, cd; pcm = ad1843_read_bits(lith, &ad1843_LDA1GM) ? 0 : SOUND_MASK_PCM; line = ad1843_read_bits(lith, &ad1843_LX1MM) ? 0 : SOUND_MASK_LINE; cd = ad1843_read_bits(lith, &ad1843_LX2MM) ? 0 : SOUND_MASK_CD; mic = ad1843_read_bits(lith, &ad1843_LMCMM) ? 0 : SOUND_MASK_MIC; return pcm | line | cd | mic; } /* * Set output sources. Arg is a mask of active sources in OSS format. * * Returns source mask on success, -errno on failure. */ static int ad1843_set_outsrc(lithium_t *lith, int mask) { int pcm, line, mic, cd; if (mask & ~(SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_CD | SOUND_MASK_MIC)) return -EINVAL; pcm = (mask & SOUND_MASK_PCM) ? 0 : 1; line = (mask & SOUND_MASK_LINE) ? 0 : 1; mic = (mask & SOUND_MASK_MIC) ? 0 : 1; cd = (mask & SOUND_MASK_CD) ? 0 : 1; ad1843_write_multi(lith, 2, &ad1843_LDA1GM, pcm, &ad1843_RDA1GM, pcm); ad1843_write_multi(lith, 2, &ad1843_LX1MM, line, &ad1843_RX1MM, line); ad1843_write_multi(lith, 2, &ad1843_LX2MM, cd, &ad1843_RX2MM, cd); ad1843_write_multi(lith, 2, &ad1843_LMCMM, mic, &ad1843_RMCMM, mic); return mask; } /* Setup ad1843 for D/A conversion. */ static void ad1843_setup_dac(lithium_t *lith, int framerate, int fmt, int channels) { int ad_fmt = 0, ad_mode = 0; DBGEV("(lith=0x%p, framerate=%d, fmt=%d, channels=%d)\n", lith, framerate, fmt, channels); switch (fmt) { case AFMT_S8: ad_fmt = 1; break; case AFMT_U8: ad_fmt = 1; break; case AFMT_S16_LE: ad_fmt = 1; break; case AFMT_MU_LAW: ad_fmt = 2; break; case AFMT_A_LAW: ad_fmt = 3; break; default: ASSERT(0); } switch (channels) { case 2: ad_mode = 0; break; case 1: ad_mode = 1; break; default: ASSERT(0); } DBGPV("ad_mode = %d, ad_fmt = %d\n", ad_mode, ad_fmt); ASSERT(framerate >= 4000 && framerate <= 49000); ad1843_write_bits(lith, &ad1843_C1C, framerate); ad1843_write_multi(lith, 2, &ad1843_DA1SM, ad_mode, &ad1843_DA1F, ad_fmt); } static void ad1843_shutdown_dac(lithium_t *lith) { ad1843_write_bits(lith, &ad1843_DA1F, 1); } static void ad1843_setup_adc(lithium_t *lith, int framerate, int fmt, int channels) { int da_fmt = 0; DBGEV("(lith=0x%p, framerate=%d, fmt=%d, channels=%d)\n", lith, framerate, fmt, channels); switch (fmt) { case AFMT_S8: da_fmt = 1; break; case AFMT_U8: da_fmt = 1; break; case AFMT_S16_LE: da_fmt = 1; break; case AFMT_MU_LAW: da_fmt = 2; break; case AFMT_A_LAW: da_fmt = 3; break; default: ASSERT(0); } DBGPV("da_fmt = %d\n", da_fmt); ASSERT(framerate >= 4000 && framerate <= 49000); ad1843_write_bits(lith, &ad1843_C2C, framerate); ad1843_write_multi(lith, 2, &ad1843_ADLF, da_fmt, &ad1843_ADRF, da_fmt); } static void ad1843_shutdown_adc(lithium_t *lith) { /* nothing to do */ } /* * Fully initialize the ad1843. As described in the AD1843 data * sheet, section "START-UP SEQUENCE". The numbered comments are * subsection headings from the data sheet. See the data sheet, pages * 52-54, for more info. * * return 0 on success, -errno on failure. */ static int __init ad1843_init(lithium_t *lith) { unsigned long later; int err; err = li_init(lith); if (err) return err; if (ad1843_read_bits(lith, &ad1843_INIT) != 0) { printk(KERN_ERR "vwsnd sound: AD1843 won't initialize\n"); return -EIO; } ad1843_write_bits(lith, &ad1843_SCF, 1); /* 4. Put the conversion resources into standby. */ ad1843_write_bits(lith, &ad1843_PDNI, 0); later = jiffies + HZ / 2; /* roughly half a second */ DBGDO(shut_up++); while (ad1843_read_bits(lith, &ad1843_PDNO)) { if (time_after(jiffies, later)) { printk(KERN_ERR "vwsnd audio: AD1843 won't power up\n"); return -EIO; } schedule(); } DBGDO(shut_up--); /* 5. Power up the clock generators and enable clock output pins. */ ad1843_write_multi(lith, 2, &ad1843_C1EN, 1, &ad1843_C2EN, 1); /* 6. Configure conversion resources while they are in standby. */ /* DAC1 uses clock 1 as source, ADC uses clock 2. Always. */ ad1843_write_multi(lith, 3, &ad1843_DA1C, 1, &ad1843_ADLC, 2, &ad1843_ADRC, 2); /* 7. Enable conversion resources. */ ad1843_write_bits(lith, &ad1843_ADTLK, 1); ad1843_write_multi(lith, 5, &ad1843_ANAEN, 1, &ad1843_AAMEN, 1, &ad1843_DA1EN, 1, &ad1843_ADLEN, 1, &ad1843_ADREN, 1); /* 8. Configure conversion resources while they are enabled. */ ad1843_write_bits(lith, &ad1843_DA1C, 1); /* Unmute all channels. */ ad1843_set_outsrc(lith, (SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD)); ad1843_write_multi(lith, 2, &ad1843_LDA1AM, 0, &ad1843_RDA1AM, 0); /* Set default recording source to Line In and set * mic gain to +20 dB. */ ad1843_set_recsrc(lith, SOUND_MASK_LINE); ad1843_write_multi(lith, 2, &ad1843_LMGE, 1, &ad1843_RMGE, 1); /* Set Speaker Out level to +/- 4V and unmute it. */ ad1843_write_multi(lith, 2, &ad1843_HPOS, 1, &ad1843_HPOM, 0); return 0; } /*****************************************************************************/ /* PCM I/O */ #define READ_INTR_MASK (LI_INTR_COMM1_TRIG | LI_INTR_COMM1_OVERFLOW) #define WRITE_INTR_MASK (LI_INTR_COMM2_TRIG | LI_INTR_COMM2_UNDERFLOW) typedef enum vwsnd_port_swstate { /* software state */ SW_OFF, SW_INITIAL, SW_RUN, SW_DRAIN, } vwsnd_port_swstate_t; typedef enum vwsnd_port_hwstate { /* hardware state */ HW_STOPPED, HW_RUNNING, } vwsnd_port_hwstate_t; /* * These flags are read by ISR, but only written at baseline. */ typedef enum vwsnd_port_flags { DISABLED = 1 << 0, ERFLOWN = 1 << 1, /* overflown or underflown */ HW_BUSY = 1 << 2, } vwsnd_port_flags_t; /* * vwsnd_port is the per-port data structure. Each device has two * ports, one for input and one for output. * * Locking: * * port->lock protects: hwstate, flags, swb_[iu]_avail. * * devc->io_mutex protects: swstate, sw_*, swb_[iu]_idx. * * everything else is only written by open/release or * pcm_{setup,shutdown}(), which are serialized by a * combination of devc->open_mutex and devc->io_mutex. */ typedef struct vwsnd_port { spinlock_t lock; wait_queue_head_t queue; vwsnd_port_swstate_t swstate; vwsnd_port_hwstate_t hwstate; vwsnd_port_flags_t flags; int sw_channels; int sw_samplefmt; int sw_framerate; int sample_size; int frame_size; unsigned int zero_word; /* zero for the sample format */ int sw_fragshift; int sw_fragcount; int sw_subdivshift; unsigned int hw_fragshift; unsigned int hw_fragsize; unsigned int hw_fragcount; int hwbuf_size; unsigned long hwbuf_paddr; unsigned long hwbuf_vaddr; void * hwbuf; /* hwbuf == hwbuf_vaddr */ int hwbuf_max; /* max bytes to preload */ void * swbuf; unsigned int swbuf_size; /* size in bytes */ unsigned int swb_u_idx; /* index of next user byte */ unsigned int swb_i_idx; /* index of next intr byte */ unsigned int swb_u_avail; /* # bytes avail to user */ unsigned int swb_i_avail; /* # bytes avail to intr */ dma_chan_t chan; /* Accounting */ int byte_count; int frag_count; int MSC_offset; } vwsnd_port_t; /* vwsnd_dev is the per-device data structure. */ typedef struct vwsnd_dev { struct vwsnd_dev *next_dev; int audio_minor; /* minor number of audio device */ int mixer_minor; /* minor number of mixer device */ struct mutex open_mutex; struct mutex io_mutex; struct mutex mix_mutex; fmode_t open_mode; wait_queue_head_t open_wait; lithium_t lith; vwsnd_port_t rport; vwsnd_port_t wport; } vwsnd_dev_t; static vwsnd_dev_t *vwsnd_dev_list; /* linked list of all devices */ static atomic_t vwsnd_use_count = ATOMIC_INIT(0); # define INC_USE_COUNT (atomic_inc(&vwsnd_use_count)) # define DEC_USE_COUNT (atomic_dec(&vwsnd_use_count)) # define IN_USE (atomic_read(&vwsnd_use_count) != 0) /* * Lithium can only DMA multiples of 32 bytes. Its DMA buffer may * be up to 8 Kb. This driver always uses 8 Kb. * * Memory bug workaround -- I'm not sure what's going on here, but * somehow pcm_copy_out() was triggering segv's going on to the next * page of the hw buffer. So, I make the hw buffer one size bigger * than we actually use. That way, the following page is allocated * and mapped, and no error. I suspect that something is broken * in Cobalt, but haven't really investigated. HBO is the actual * size of the buffer, and HWBUF_ORDER is what we allocate. */ #define HWBUF_SHIFT 13 #define HWBUF_SIZE (1 << HWBUF_SHIFT) # define HBO (HWBUF_SHIFT > PAGE_SHIFT ? HWBUF_SHIFT - PAGE_SHIFT : 0) # define HWBUF_ORDER (HBO + 1) /* next size bigger */ #define MIN_SPEED 4000 #define MAX_SPEED 49000 #define MIN_FRAGSHIFT (DMACHUNK_SHIFT + 1) #define MAX_FRAGSHIFT (PAGE_SHIFT) #define MIN_FRAGSIZE (1 << MIN_FRAGSHIFT) #define MAX_FRAGSIZE (1 << MAX_FRAGSHIFT) #define MIN_FRAGCOUNT(fragsize) 3 #define MAX_FRAGCOUNT(fragsize) (32 * PAGE_SIZE / (fragsize)) #define DEFAULT_FRAGSHIFT 12 #define DEFAULT_FRAGCOUNT 16 #define DEFAULT_SUBDIVSHIFT 0 /* * The software buffer (swbuf) is a ring buffer shared between user * level and interrupt level. Each level owns some of the bytes in * the buffer, and may give bytes away by calling swb_inc_{u,i}(). * User level calls _u for user, and interrupt level calls _i for * interrupt. * * port->swb_{u,i}_avail is the number of bytes available to that level. * * port->swb_{u,i}_idx is the index of the first available byte in the * buffer. * * Each level calls swb_inc_{u,i}() to atomically increment its index, * recalculate the number of bytes available for both sides, and * return the number of bytes available. Since each side can only * give away bytes, the other side can only increase the number of * bytes available to this side. Each side updates its own index * variable, swb_{u,i}_idx, so no lock is needed to read it. * * To query the number of bytes available, call swb_inc_{u,i} with an * increment of zero. */ static __inline__ unsigned int __swb_inc_u(vwsnd_port_t *port, int inc) { if (inc) { port->swb_u_idx += inc; port->swb_u_idx %= port->swbuf_size; port->swb_u_avail -= inc; port->swb_i_avail += inc; } return port->swb_u_avail; } static __inline__ unsigned int swb_inc_u(vwsnd_port_t *port, int inc) { unsigned long flags; unsigned int ret; spin_lock_irqsave(&port->lock, flags); { ret = __swb_inc_u(port, inc); } spin_unlock_irqrestore(&port->lock, flags); return ret; } static __inline__ unsigned int __swb_inc_i(vwsnd_port_t *port, int inc) { if (inc) { port->swb_i_idx += inc; port->swb_i_idx %= port->swbuf_size; port->swb_i_avail -= inc; port->swb_u_avail += inc; } return port->swb_i_avail; } static __inline__ unsigned int swb_inc_i(vwsnd_port_t *port, int inc) { unsigned long flags; unsigned int ret; spin_lock_irqsave(&port->lock, flags); { ret = __swb_inc_i(port, inc); } spin_unlock_irqrestore(&port->lock, flags); return ret; } /* * pcm_setup - this routine initializes all port state after * mode-setting ioctls have been done, but before the first I/O is * done. * * Locking: called with devc->io_mutex held. * * Returns 0 on success, -errno on failure. */ static int pcm_setup(vwsnd_dev_t *devc, vwsnd_port_t *rport, vwsnd_port_t *wport) { vwsnd_port_t *aport = rport ? rport : wport; int sample_size; unsigned int zero_word; DBGEV("(devc=0x%p, rport=0x%p, wport=0x%p)\n", devc, rport, wport); ASSERT(aport != NULL); if (aport->swbuf != NULL) return 0; switch (aport->sw_samplefmt) { case AFMT_MU_LAW: sample_size = 1; zero_word = 0xFFFFFFFF ^ 0x80808080; break; case AFMT_A_LAW: sample_size = 1; zero_word = 0xD5D5D5D5 ^ 0x80808080; break; case AFMT_U8: sample_size = 1; zero_word = 0x80808080; break; case AFMT_S8: sample_size = 1; zero_word = 0x00000000; break; case AFMT_S16_LE: sample_size = 2; zero_word = 0x00000000; break; default: sample_size = 0; /* prevent compiler warning */ zero_word = 0; ASSERT(0); } aport->sample_size = sample_size; aport->zero_word = zero_word; aport->frame_size = aport->sw_channels * aport->sample_size; aport->hw_fragshift = aport->sw_fragshift - aport->sw_subdivshift; aport->hw_fragsize = 1 << aport->hw_fragshift; aport->hw_fragcount = aport->sw_fragcount << aport->sw_subdivshift; ASSERT(aport->hw_fragsize >= MIN_FRAGSIZE); ASSERT(aport->hw_fragsize <= MAX_FRAGSIZE); ASSERT(aport->hw_fragcount >= MIN_FRAGCOUNT(aport->hw_fragsize)); ASSERT(aport->hw_fragcount <= MAX_FRAGCOUNT(aport->hw_fragsize)); if (rport) { int hwfrags, swfrags; rport->hwbuf_max = aport->hwbuf_size - DMACHUNK_SIZE; hwfrags = rport->hwbuf_max >> aport->hw_fragshift; swfrags = aport->hw_fragcount - hwfrags; if (swfrags < 2) swfrags = 2; rport->swbuf_size = swfrags * aport->hw_fragsize; DBGPV("hwfrags = %d, swfrags = %d\n", hwfrags, swfrags); DBGPV("read hwbuf_max = %d, swbuf_size = %d\n", rport->hwbuf_max, rport->swbuf_size); } if (wport) { int hwfrags, swfrags; int total_bytes = aport->hw_fragcount * aport->hw_fragsize; wport->hwbuf_max = aport->hwbuf_size - DMACHUNK_SIZE; if (wport->hwbuf_max > total_bytes) wport->hwbuf_max = total_bytes; hwfrags = wport->hwbuf_max >> aport->hw_fragshift; DBGPV("hwfrags = %d\n", hwfrags); swfrags = aport->hw_fragcount - hwfrags; if (swfrags < 2) swfrags = 2; wport->swbuf_size = swfrags * aport->hw_fragsize; DBGPV("hwfrags = %d, swfrags = %d\n", hwfrags, swfrags); DBGPV("write hwbuf_max = %d, swbuf_size = %d\n", wport->hwbuf_max, wport->swbuf_size); } aport->swb_u_idx = 0; aport->swb_i_idx = 0; aport->byte_count = 0; /* * Is this a Cobalt bug? We need to make this buffer extend * one page further than we actually use -- somehow memcpy * causes an exceptoin otherwise. I suspect there's a bug in * Cobalt (or somewhere) where it's generating a fault on a * speculative load or something. Obviously, I haven't taken * the time to track it down. */ aport->swbuf = vmalloc(aport->swbuf_size + PAGE_SIZE); if (!aport->swbuf) return -ENOMEM; if (rport && wport) { ASSERT(aport == rport); ASSERT(wport->swbuf == NULL); /* One extra page - see comment above. */ wport->swbuf = vmalloc(aport->swbuf_size + PAGE_SIZE); if (!wport->swbuf) { vfree(aport->swbuf); aport->swbuf = NULL; return -ENOMEM; } wport->sample_size = rport->sample_size; wport->zero_word = rport->zero_word; wport->frame_size = rport->frame_size; wport->hw_fragshift = rport->hw_fragshift; wport->hw_fragsize = rport->hw_fragsize; wport->hw_fragcount = rport->hw_fragcount; wport->swbuf_size = rport->swbuf_size; wport->hwbuf_max = rport->hwbuf_max; wport->swb_u_idx = rport->swb_u_idx; wport->swb_i_idx = rport->swb_i_idx; wport->byte_count = rport->byte_count; } if (rport) { rport->swb_u_avail = 0; rport->swb_i_avail = rport->swbuf_size; rport->swstate = SW_RUN; li_setup_dma(&rport->chan, &li_comm1, &devc->lith, rport->hwbuf_paddr, HWBUF_SHIFT, rport->hw_fragshift, rport->sw_channels, rport->sample_size); ad1843_setup_adc(&devc->lith, rport->sw_framerate, rport->sw_samplefmt, rport->sw_channels); li_enable_interrupts(&devc->lith, READ_INTR_MASK); if (!(rport->flags & DISABLED)) { ustmsc_t ustmsc; rport->hwstate = HW_RUNNING; li_activate_dma(&rport->chan); li_read_USTMSC(&rport->chan, &ustmsc); rport->MSC_offset = ustmsc.msc; } } if (wport) { if (wport->hwbuf_max > wport->swbuf_size) wport->hwbuf_max = wport->swbuf_size; wport->flags &= ~ERFLOWN; wport->swb_u_avail = wport->swbuf_size; wport->swb_i_avail = 0; wport->swstate = SW_RUN; li_setup_dma(&wport->chan, &li_comm2, &devc->lith, wport->hwbuf_paddr, HWBUF_SHIFT, wport->hw_fragshift, wport->sw_channels, wport->sample_size); ad1843_setup_dac(&devc->lith, wport->sw_framerate, wport->sw_samplefmt, wport->sw_channels); li_enable_interrupts(&devc->lith, WRITE_INTR_MASK); } DBGRV(); return 0; } /* * pcm_shutdown_port - shut down one port (direction) for PCM I/O. * Only called from pcm_shutdown. */ static void pcm_shutdown_port(vwsnd_dev_t *devc, vwsnd_port_t *aport, unsigned int mask) { unsigned long flags; vwsnd_port_hwstate_t hwstate; DECLARE_WAITQUEUE(wait, current); aport->swstate = SW_INITIAL; add_wait_queue(&aport->queue, &wait); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irqsave(&aport->lock, flags); { hwstate = aport->hwstate; } spin_unlock_irqrestore(&aport->lock, flags); if (hwstate == HW_STOPPED) break; schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&aport->queue, &wait); li_disable_interrupts(&devc->lith, mask); if (aport == &devc->rport) ad1843_shutdown_adc(&devc->lith); else /* aport == &devc->wport) */ ad1843_shutdown_dac(&devc->lith); li_shutdown_dma(&aport->chan); vfree(aport->swbuf); aport->swbuf = NULL; aport->byte_count = 0; } /* * pcm_shutdown undoes what pcm_setup did. * Also sets the ports' swstate to newstate. */ static void pcm_shutdown(vwsnd_dev_t *devc, vwsnd_port_t *rport, vwsnd_port_t *wport) { DBGEV("(devc=0x%p, rport=0x%p, wport=0x%p)\n", devc, rport, wport); if (rport && rport->swbuf) { DBGPV("shutting down rport\n"); pcm_shutdown_port(devc, rport, READ_INTR_MASK); } if (wport && wport->swbuf) { DBGPV("shutting down wport\n"); pcm_shutdown_port(devc, wport, WRITE_INTR_MASK); } DBGRV(); } static void pcm_copy_in(vwsnd_port_t *rport, int swidx, int hwidx, int nb) { char *src = rport->hwbuf + hwidx; char *dst = rport->swbuf + swidx; int fmt = rport->sw_samplefmt; DBGPV("swidx = %d, hwidx = %d\n", swidx, hwidx); ASSERT(rport->hwbuf != NULL); ASSERT(rport->swbuf != NULL); ASSERT(nb > 0 && (nb % 32) == 0); ASSERT(swidx % 32 == 0 && hwidx % 32 == 0); ASSERT(swidx >= 0 && swidx + nb <= rport->swbuf_size); ASSERT(hwidx >= 0 && hwidx + nb <= rport->hwbuf_size); if (fmt == AFMT_MU_LAW || fmt == AFMT_A_LAW || fmt == AFMT_S8) { /* See Sample Format Notes above. */ char *end = src + nb; while (src < end) *dst++ = *src++ ^ 0x80; } else memcpy(dst, src, nb); } static void pcm_copy_out(vwsnd_port_t *wport, int swidx, int hwidx, int nb) { char *src = wport->swbuf + swidx; char *dst = wport->hwbuf + hwidx; int fmt = wport->sw_samplefmt; ASSERT(nb > 0 && (nb % 32) == 0); ASSERT(wport->hwbuf != NULL); ASSERT(wport->swbuf != NULL); ASSERT(swidx % 32 == 0 && hwidx % 32 == 0); ASSERT(swidx >= 0 && swidx + nb <= wport->swbuf_size); ASSERT(hwidx >= 0 && hwidx + nb <= wport->hwbuf_size); if (fmt == AFMT_MU_LAW || fmt == AFMT_A_LAW || fmt == AFMT_S8) { /* See Sample Format Notes above. */ char *end = src + nb; while (src < end) *dst++ = *src++ ^ 0x80; } else memcpy(dst, src, nb); } /* * pcm_output() is called both from baselevel and from interrupt level. * This is where audio frames are copied into the hardware-accessible * ring buffer. * * Locking note: The part of this routine that figures out what to do * holds wport->lock. The longer part releases wport->lock, but sets * wport->flags & HW_BUSY. Afterward, it reacquires wport->lock, and * checks for more work to do. * * If another thread calls pcm_output() while HW_BUSY is set, it * returns immediately, knowing that the thread that set HW_BUSY will * look for more work to do before returning. * * This has the advantage that port->lock is held for several short * periods instead of one long period. Also, when pcm_output is * called from base level, it reenables interrupts. */ static void pcm_output(vwsnd_dev_t *devc, int erflown, int nb) { vwsnd_port_t *wport = &devc->wport; const int hwmax = wport->hwbuf_max; const int hwsize = wport->hwbuf_size; const int swsize = wport->swbuf_size; const int fragsize = wport->hw_fragsize; unsigned long iflags; DBGEV("(devc=0x%p, erflown=%d, nb=%d)\n", devc, erflown, nb); spin_lock_irqsave(&wport->lock, iflags); if (erflown) wport->flags |= ERFLOWN; (void) __swb_inc_u(wport, nb); if (wport->flags & HW_BUSY) { spin_unlock_irqrestore(&wport->lock, iflags); DBGPV("returning: HW BUSY\n"); return; } if (wport->flags & DISABLED) { spin_unlock_irqrestore(&wport->lock, iflags); DBGPV("returning: DISABLED\n"); return; } wport->flags |= HW_BUSY; while (1) { int swptr, hwptr, hw_avail, sw_avail, swidx; vwsnd_port_hwstate_t hwstate = wport->hwstate; vwsnd_port_swstate_t swstate = wport->swstate; int hw_unavail; ustmsc_t ustmsc; hwptr = li_read_hwptr(&wport->chan); swptr = li_read_swptr(&wport->chan); hw_unavail = (swptr - hwptr + hwsize) % hwsize; hw_avail = (hwmax - hw_unavail) & -fragsize; sw_avail = wport->swb_i_avail & -fragsize; if (sw_avail && swstate == SW_RUN) { if (wport->flags & ERFLOWN) { wport->flags &= ~ERFLOWN; } } else if (swstate == SW_INITIAL || swstate == SW_OFF || (swstate == SW_DRAIN && !sw_avail && (wport->flags & ERFLOWN))) { DBGP("stopping. hwstate = %d\n", hwstate); if (hwstate != HW_STOPPED) { li_deactivate_dma(&wport->chan); wport->hwstate = HW_STOPPED; } wake_up(&wport->queue); break; } if (!sw_avail || !hw_avail) break; spin_unlock_irqrestore(&wport->lock, iflags); /* * We gave up the port lock, but we have the HW_BUSY flag. * Proceed without accessing any nonlocal state. * Do not exit the loop -- must check for more work. */ swidx = wport->swb_i_idx; nb = hw_avail; if (nb > sw_avail) nb = sw_avail; if (nb > hwsize - swptr) nb = hwsize - swptr; /* don't overflow hwbuf */ if (nb > swsize - swidx) nb = swsize - swidx; /* don't overflow swbuf */ ASSERT(nb > 0); if (nb % fragsize) { DBGP("nb = %d, fragsize = %d\n", nb, fragsize); DBGP("hw_avail = %d\n", hw_avail); DBGP("sw_avail = %d\n", sw_avail); DBGP("hwsize = %d, swptr = %d\n", hwsize, swptr); DBGP("swsize = %d, swidx = %d\n", swsize, swidx); } ASSERT(!(nb % fragsize)); DBGPV("copying swb[%d..%d] to hwb[%d..%d]\n", swidx, swidx + nb, swptr, swptr + nb); pcm_copy_out(wport, swidx, swptr, nb); li_write_swptr(&wport->chan, (swptr + nb) % hwsize); spin_lock_irqsave(&wport->lock, iflags); if (hwstate == HW_STOPPED) { DBGPV("starting\n"); li_activate_dma(&wport->chan); wport->hwstate = HW_RUNNING; li_read_USTMSC(&wport->chan, &ustmsc); ASSERT(wport->byte_count % wport->frame_size == 0); wport->MSC_offset = ustmsc.msc - wport->byte_count / wport->frame_size; } __swb_inc_i(wport, nb); wport->byte_count += nb; wport->frag_count += nb / fragsize; ASSERT(nb % fragsize == 0); wake_up(&wport->queue); } wport->flags &= ~HW_BUSY; spin_unlock_irqrestore(&wport->lock, iflags); DBGRV(); } /* * pcm_input() is called both from baselevel and from interrupt level. * This is where audio frames are copied out of the hardware-accessible * ring buffer. * * Locking note: The part of this routine that figures out what to do * holds rport->lock. The longer part releases rport->lock, but sets * rport->flags & HW_BUSY. Afterward, it reacquires rport->lock, and * checks for more work to do. * * If another thread calls pcm_input() while HW_BUSY is set, it * returns immediately, knowing that the thread that set HW_BUSY will * look for more work to do before returning. * * This has the advantage that port->lock is held for several short * periods instead of one long period. Also, when pcm_input is * called from base level, it reenables interrupts. */ static void pcm_input(vwsnd_dev_t *devc, int erflown, int nb) { vwsnd_port_t *rport = &devc->rport; const int hwmax = rport->hwbuf_max; const int hwsize = rport->hwbuf_size; const int swsize = rport->swbuf_size; const int fragsize = rport->hw_fragsize; unsigned long iflags; DBGEV("(devc=0x%p, erflown=%d, nb=%d)\n", devc, erflown, nb); spin_lock_irqsave(&rport->lock, iflags); if (erflown) rport->flags |= ERFLOWN; (void) __swb_inc_u(rport, nb); if (rport->flags & HW_BUSY || !rport->swbuf) { spin_unlock_irqrestore(&rport->lock, iflags); DBGPV("returning: HW BUSY or !swbuf\n"); return; } if (rport->flags & DISABLED) { spin_unlock_irqrestore(&rport->lock, iflags); DBGPV("returning: DISABLED\n"); return; } rport->flags |= HW_BUSY; while (1) { int swptr, hwptr, hw_avail, sw_avail, swidx; vwsnd_port_hwstate_t hwstate = rport->hwstate; vwsnd_port_swstate_t swstate = rport->swstate; hwptr = li_read_hwptr(&rport->chan); swptr = li_read_swptr(&rport->chan); hw_avail = (hwptr - swptr + hwsize) % hwsize & -fragsize; if (hw_avail > hwmax) hw_avail = hwmax; sw_avail = rport->swb_i_avail & -fragsize; if (swstate != SW_RUN) { DBGP("stopping. hwstate = %d\n", hwstate); if (hwstate != HW_STOPPED) { li_deactivate_dma(&rport->chan); rport->hwstate = HW_STOPPED; } wake_up(&rport->queue); break; } if (!sw_avail || !hw_avail) break; spin_unlock_irqrestore(&rport->lock, iflags); /* * We gave up the port lock, but we have the HW_BUSY flag. * Proceed without accessing any nonlocal state. * Do not exit the loop -- must check for more work. */ swidx = rport->swb_i_idx; nb = hw_avail; if (nb > sw_avail) nb = sw_avail; if (nb > hwsize - swptr) nb = hwsize - swptr; /* don't overflow hwbuf */ if (nb > swsize - swidx) nb = swsize - swidx; /* don't overflow swbuf */ ASSERT(nb > 0); if (nb % fragsize) { DBGP("nb = %d, fragsize = %d\n", nb, fragsize); DBGP("hw_avail = %d\n", hw_avail); DBGP("sw_avail = %d\n", sw_avail); DBGP("hwsize = %d, swptr = %d\n", hwsize, swptr); DBGP("swsize = %d, swidx = %d\n", swsize, swidx); } ASSERT(!(nb % fragsize)); DBGPV("copying hwb[%d..%d] to swb[%d..%d]\n", swptr, swptr + nb, swidx, swidx + nb); pcm_copy_in(rport, swidx, swptr, nb); li_write_swptr(&rport->chan, (swptr + nb) % hwsize); spin_lock_irqsave(&rport->lock, iflags); __swb_inc_i(rport, nb); rport->byte_count += nb; rport->frag_count += nb / fragsize; ASSERT(nb % fragsize == 0); wake_up(&rport->queue); } rport->flags &= ~HW_BUSY; spin_unlock_irqrestore(&rport->lock, iflags); DBGRV(); } /* * pcm_flush_frag() writes zero samples to fill the current fragment, * then flushes it to the hardware. * * It is only meaningful to flush output, not input. */ static void pcm_flush_frag(vwsnd_dev_t *devc) { vwsnd_port_t *wport = &devc->wport; DBGPV("swstate = %d\n", wport->swstate); if (wport->swstate == SW_RUN) { int idx = wport->swb_u_idx; int end = (idx + wport->hw_fragsize - 1) >> wport->hw_fragshift << wport->hw_fragshift; int nb = end - idx; DBGPV("clearing %d bytes\n", nb); if (nb) memset(wport->swbuf + idx, (char) wport->zero_word, nb); wport->swstate = SW_DRAIN; pcm_output(devc, 0, nb); } DBGRV(); } /* * Wait for output to drain. This sleeps uninterruptibly because * there is nothing intelligent we can do if interrupted. This * means the process will be delayed in responding to the signal. */ static void pcm_write_sync(vwsnd_dev_t *devc) { vwsnd_port_t *wport = &devc->wport; DECLARE_WAITQUEUE(wait, current); unsigned long flags; vwsnd_port_hwstate_t hwstate; DBGEV("(devc=0x%p)\n", devc); add_wait_queue(&wport->queue, &wait); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irqsave(&wport->lock, flags); { hwstate = wport->hwstate; } spin_unlock_irqrestore(&wport->lock, flags); if (hwstate == HW_STOPPED) break; schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&wport->queue, &wait); DBGPV("swstate = %d, hwstate = %d\n", wport->swstate, wport->hwstate); DBGRV(); } /*****************************************************************************/ /* audio driver */ /* * seek on an audio device always fails. */ static void vwsnd_audio_read_intr(vwsnd_dev_t *devc, unsigned int status) { int overflown = status & LI_INTR_COMM1_OVERFLOW; if (status & READ_INTR_MASK) pcm_input(devc, overflown, 0); } static void vwsnd_audio_write_intr(vwsnd_dev_t *devc, unsigned int status) { int underflown = status & LI_INTR_COMM2_UNDERFLOW; if (status & WRITE_INTR_MASK) pcm_output(devc, underflown, 0); } static irqreturn_t vwsnd_audio_intr(int irq, void *dev_id) { vwsnd_dev_t *devc = dev_id; unsigned int status; DBGEV("(irq=%d, dev_id=0x%p)\n", irq, dev_id); status = li_get_clear_intr_status(&devc->lith); vwsnd_audio_read_intr(devc, status); vwsnd_audio_write_intr(devc, status); return IRQ_HANDLED; } static ssize_t vwsnd_audio_do_read(struct file *file, char *buffer, size_t count, loff_t *ppos) { vwsnd_dev_t *devc = file->private_data; vwsnd_port_t *rport = ((file->f_mode & FMODE_READ) ? &devc->rport : NULL); int ret, nb; DBGEV("(file=0x%p, buffer=0x%p, count=%d, ppos=0x%p)\n", file, buffer, count, ppos); if (!rport) return -EINVAL; if (rport->swbuf == NULL) { vwsnd_port_t *wport = (file->f_mode & FMODE_WRITE) ? &devc->wport : NULL; ret = pcm_setup(devc, rport, wport); if (ret < 0) return ret; } if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; ret = 0; while (count) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&rport->queue, &wait); while ((nb = swb_inc_u(rport, 0)) == 0) { DBGPV("blocking\n"); set_current_state(TASK_INTERRUPTIBLE); if (rport->flags & DISABLED || file->f_flags & O_NONBLOCK) { current->state = TASK_RUNNING; remove_wait_queue(&rport->queue, &wait); return ret ? ret : -EAGAIN; } schedule(); if (signal_pending(current)) { current->state = TASK_RUNNING; remove_wait_queue(&rport->queue, &wait); return ret ? ret : -ERESTARTSYS; } } current->state = TASK_RUNNING; remove_wait_queue(&rport->queue, &wait); pcm_input(devc, 0, 0); /* nb bytes are available in userbuf. */ if (nb > count) nb = count; DBGPV("nb = %d\n", nb); if (copy_to_user(buffer, rport->swbuf + rport->swb_u_idx, nb)) return -EFAULT; (void) swb_inc_u(rport, nb); buffer += nb; count -= nb; ret += nb; } DBGPV("returning %d\n", ret); return ret; } static ssize_t vwsnd_audio_read(struct file *file, char *buffer, size_t count, loff_t *ppos) { vwsnd_dev_t *devc = file->private_data; ssize_t ret; mutex_lock(&devc->io_mutex); ret = vwsnd_audio_do_read(file, buffer, count, ppos); mutex_unlock(&devc->io_mutex); return ret; } static ssize_t vwsnd_audio_do_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) { vwsnd_dev_t *devc = file->private_data; vwsnd_port_t *wport = ((file->f_mode & FMODE_WRITE) ? &devc->wport : NULL); int ret, nb; DBGEV("(file=0x%p, buffer=0x%p, count=%d, ppos=0x%p)\n", file, buffer, count, ppos); if (!wport) return -EINVAL; if (wport->swbuf == NULL) { vwsnd_port_t *rport = (file->f_mode & FMODE_READ) ? &devc->rport : NULL; ret = pcm_setup(devc, rport, wport); if (ret < 0) return ret; } if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; ret = 0; while (count) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&wport->queue, &wait); while ((nb = swb_inc_u(wport, 0)) == 0) { set_current_state(TASK_INTERRUPTIBLE); if (wport->flags & DISABLED || file->f_flags & O_NONBLOCK) { current->state = TASK_RUNNING; remove_wait_queue(&wport->queue, &wait); return ret ? ret : -EAGAIN; } schedule(); if (signal_pending(current)) { current->state = TASK_RUNNING; remove_wait_queue(&wport->queue, &wait); return ret ? ret : -ERESTARTSYS; } } current->state = TASK_RUNNING; remove_wait_queue(&wport->queue, &wait); /* nb bytes are available in userbuf. */ if (nb > count) nb = count; DBGPV("nb = %d\n", nb); if (copy_from_user(wport->swbuf + wport->swb_u_idx, buffer, nb)) return -EFAULT; pcm_output(devc, 0, nb); buffer += nb; count -= nb; ret += nb; } DBGPV("returning %d\n", ret); return ret; } static ssize_t vwsnd_audio_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) { vwsnd_dev_t *devc = file->private_data; ssize_t ret; mutex_lock(&devc->io_mutex); ret = vwsnd_audio_do_write(file, buffer, count, ppos); mutex_unlock(&devc->io_mutex); return ret; } /* No kernel lock - fine */ static unsigned int vwsnd_audio_poll(struct file *file, struct poll_table_struct *wait) { vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; vwsnd_port_t *rport = (file->f_mode & FMODE_READ) ? &devc->rport : NULL; vwsnd_port_t *wport = (file->f_mode & FMODE_WRITE) ? &devc->wport : NULL; unsigned int mask = 0; DBGEV("(file=0x%p, wait=0x%p)\n", file, wait); ASSERT(rport || wport); if (rport) { poll_wait(file, &rport->queue, wait); if (swb_inc_u(rport, 0)) mask |= (POLLIN | POLLRDNORM); } if (wport) { poll_wait(file, &wport->queue, wait); if (wport->swbuf == NULL || swb_inc_u(wport, 0)) mask |= (POLLOUT | POLLWRNORM); } DBGPV("returning 0x%x\n", mask); return mask; } static int vwsnd_audio_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; vwsnd_port_t *rport = (file->f_mode & FMODE_READ) ? &devc->rport : NULL; vwsnd_port_t *wport = (file->f_mode & FMODE_WRITE) ? &devc->wport : NULL; vwsnd_port_t *aport = rport ? rport : wport; struct audio_buf_info buf_info; struct count_info info; unsigned long flags; int ival; DBGEV("(file=0x%p, cmd=0x%x, arg=0x%lx)\n", file, cmd, arg); switch (cmd) { case OSS_GETVERSION: /* _SIOR ('M', 118, int) */ DBGX("OSS_GETVERSION\n"); ival = SOUND_VERSION; return put_user(ival, (int *) arg); case SNDCTL_DSP_GETCAPS: /* _SIOR ('P',15, int) */ DBGX("SNDCTL_DSP_GETCAPS\n"); ival = DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER; return put_user(ival, (int *) arg); case SNDCTL_DSP_GETFMTS: /* _SIOR ('P',11, int) */ DBGX("SNDCTL_DSP_GETFMTS\n"); ival = (AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_U8 | AFMT_S8); return put_user(ival, (int *) arg); break; case SOUND_PCM_READ_RATE: /* _SIOR ('P', 2, int) */ DBGX("SOUND_PCM_READ_RATE\n"); ival = aport->sw_framerate; return put_user(ival, (int *) arg); case SOUND_PCM_READ_CHANNELS: /* _SIOR ('P', 6, int) */ DBGX("SOUND_PCM_READ_CHANNELS\n"); ival = aport->sw_channels; return put_user(ival, (int *) arg); case SNDCTL_DSP_SPEED: /* _SIOWR('P', 2, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_SPEED %d\n", ival); if (ival) { if (aport->swstate != SW_INITIAL) { DBGX("SNDCTL_DSP_SPEED failed: swstate = %d\n", aport->swstate); return -EINVAL; } if (ival < MIN_SPEED) ival = MIN_SPEED; if (ival > MAX_SPEED) ival = MAX_SPEED; if (rport) rport->sw_framerate = ival; if (wport) wport->sw_framerate = ival; } else ival = aport->sw_framerate; return put_user(ival, (int *) arg); case SNDCTL_DSP_STEREO: /* _SIOWR('P', 3, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_STEREO %d\n", ival); if (ival != 0 && ival != 1) return -EINVAL; if (aport->swstate != SW_INITIAL) return -EINVAL; if (rport) rport->sw_channels = ival + 1; if (wport) wport->sw_channels = ival + 1; return put_user(ival, (int *) arg); case SNDCTL_DSP_CHANNELS: /* _SIOWR('P', 6, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_CHANNELS %d\n", ival); if (ival != 1 && ival != 2) return -EINVAL; if (aport->swstate != SW_INITIAL) return -EINVAL; if (rport) rport->sw_channels = ival; if (wport) wport->sw_channels = ival; return put_user(ival, (int *) arg); case SNDCTL_DSP_GETBLKSIZE: /* _SIOWR('P', 4, int) */ ival = pcm_setup(devc, rport, wport); if (ival < 0) { DBGX("SNDCTL_DSP_GETBLKSIZE failed, errno %d\n", ival); return ival; } ival = 1 << aport->sw_fragshift; DBGX("SNDCTL_DSP_GETBLKSIZE returning %d\n", ival); return put_user(ival, (int *) arg); case SNDCTL_DSP_SETFRAGMENT: /* _SIOWR('P',10, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_SETFRAGMENT %d:%d\n", ival >> 16, ival & 0xFFFF); if (aport->swstate != SW_INITIAL) return -EINVAL; { int sw_fragshift = ival & 0xFFFF; int sw_subdivshift = aport->sw_subdivshift; int hw_fragshift = sw_fragshift - sw_subdivshift; int sw_fragcount = (ival >> 16) & 0xFFFF; int hw_fragsize; if (hw_fragshift < MIN_FRAGSHIFT) hw_fragshift = MIN_FRAGSHIFT; if (hw_fragshift > MAX_FRAGSHIFT) hw_fragshift = MAX_FRAGSHIFT; sw_fragshift = hw_fragshift + aport->sw_subdivshift; hw_fragsize = 1 << hw_fragshift; if (sw_fragcount < MIN_FRAGCOUNT(hw_fragsize)) sw_fragcount = MIN_FRAGCOUNT(hw_fragsize); if (sw_fragcount > MAX_FRAGCOUNT(hw_fragsize)) sw_fragcount = MAX_FRAGCOUNT(hw_fragsize); DBGPV("sw_fragshift = %d\n", sw_fragshift); DBGPV("rport = 0x%p, wport = 0x%p\n", rport, wport); if (rport) { rport->sw_fragshift = sw_fragshift; rport->sw_fragcount = sw_fragcount; } if (wport) { wport->sw_fragshift = sw_fragshift; wport->sw_fragcount = sw_fragcount; } ival = sw_fragcount << 16 | sw_fragshift; } DBGX("SNDCTL_DSP_SETFRAGMENT returns %d:%d\n", ival >> 16, ival & 0xFFFF); return put_user(ival, (int *) arg); case SNDCTL_DSP_SUBDIVIDE: /* _SIOWR('P', 9, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_SUBDIVIDE %d\n", ival); if (aport->swstate != SW_INITIAL) return -EINVAL; { int subdivshift; int hw_fragshift, hw_fragsize, hw_fragcount; switch (ival) { case 1: subdivshift = 0; break; case 2: subdivshift = 1; break; case 4: subdivshift = 2; break; default: return -EINVAL; } hw_fragshift = aport->sw_fragshift - subdivshift; if (hw_fragshift < MIN_FRAGSHIFT || hw_fragshift > MAX_FRAGSHIFT) return -EINVAL; hw_fragsize = 1 << hw_fragshift; hw_fragcount = aport->sw_fragcount >> subdivshift; if (hw_fragcount < MIN_FRAGCOUNT(hw_fragsize) || hw_fragcount > MAX_FRAGCOUNT(hw_fragsize)) return -EINVAL; if (rport) rport->sw_subdivshift = subdivshift; if (wport) wport->sw_subdivshift = subdivshift; } return 0; case SNDCTL_DSP_SETFMT: /* _SIOWR('P',5, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_SETFMT %d\n", ival); if (ival != AFMT_QUERY) { if (aport->swstate != SW_INITIAL) { DBGP("SETFMT failed, swstate = %d\n", aport->swstate); return -EINVAL; } switch (ival) { case AFMT_MU_LAW: case AFMT_A_LAW: case AFMT_U8: case AFMT_S8: case AFMT_S16_LE: if (rport) rport->sw_samplefmt = ival; if (wport) wport->sw_samplefmt = ival; break; default: return -EINVAL; } } ival = aport->sw_samplefmt; return put_user(ival, (int *) arg); case SNDCTL_DSP_GETOSPACE: /* _SIOR ('P',12, audio_buf_info) */ DBGXV("SNDCTL_DSP_GETOSPACE\n"); if (!wport) return -EINVAL; ival = pcm_setup(devc, rport, wport); if (ival < 0) return ival; ival = swb_inc_u(wport, 0); buf_info.fragments = ival >> wport->sw_fragshift; buf_info.fragstotal = wport->sw_fragcount; buf_info.fragsize = 1 << wport->sw_fragshift; buf_info.bytes = ival; DBGXV("SNDCTL_DSP_GETOSPACE returns { %d %d %d %d }\n", buf_info.fragments, buf_info.fragstotal, buf_info.fragsize, buf_info.bytes); if (copy_to_user((void *) arg, &buf_info, sizeof buf_info)) return -EFAULT; return 0; case SNDCTL_DSP_GETISPACE: /* _SIOR ('P',13, audio_buf_info) */ DBGX("SNDCTL_DSP_GETISPACE\n"); if (!rport) return -EINVAL; ival = pcm_setup(devc, rport, wport); if (ival < 0) return ival; ival = swb_inc_u(rport, 0); buf_info.fragments = ival >> rport->sw_fragshift; buf_info.fragstotal = rport->sw_fragcount; buf_info.fragsize = 1 << rport->sw_fragshift; buf_info.bytes = ival; DBGX("SNDCTL_DSP_GETISPACE returns { %d %d %d %d }\n", buf_info.fragments, buf_info.fragstotal, buf_info.fragsize, buf_info.bytes); if (copy_to_user((void *) arg, &buf_info, sizeof buf_info)) return -EFAULT; return 0; case SNDCTL_DSP_NONBLOCK: /* _SIO ('P',14) */ DBGX("SNDCTL_DSP_NONBLOCK\n"); spin_lock(&file->f_lock); file->f_flags |= O_NONBLOCK; spin_unlock(&file->f_lock); return 0; case SNDCTL_DSP_RESET: /* _SIO ('P', 0) */ DBGX("SNDCTL_DSP_RESET\n"); /* * Nothing special needs to be done for input. Input * samples sit in swbuf, but it will be reinitialized * to empty when pcm_setup() is called. */ if (wport && wport->swbuf) { wport->swstate = SW_INITIAL; pcm_output(devc, 0, 0); pcm_write_sync(devc); } pcm_shutdown(devc, rport, wport); return 0; case SNDCTL_DSP_SYNC: /* _SIO ('P', 1) */ DBGX("SNDCTL_DSP_SYNC\n"); if (wport) { pcm_flush_frag(devc); pcm_write_sync(devc); } pcm_shutdown(devc, rport, wport); return 0; case SNDCTL_DSP_POST: /* _SIO ('P', 8) */ DBGX("SNDCTL_DSP_POST\n"); if (!wport) return -EINVAL; pcm_flush_frag(devc); return 0; case SNDCTL_DSP_GETIPTR: /* _SIOR ('P', 17, count_info) */ DBGX("SNDCTL_DSP_GETIPTR\n"); if (!rport) return -EINVAL; spin_lock_irqsave(&rport->lock, flags); { ustmsc_t ustmsc; if (rport->hwstate == HW_RUNNING) { ASSERT(rport->swstate == SW_RUN); li_read_USTMSC(&rport->chan, &ustmsc); info.bytes = ustmsc.msc - rport->MSC_offset; info.bytes *= rport->frame_size; } else { info.bytes = rport->byte_count; } info.blocks = rport->frag_count; info.ptr = 0; /* not implemented */ rport->frag_count = 0; } spin_unlock_irqrestore(&rport->lock, flags); if (copy_to_user((void *) arg, &info, sizeof info)) return -EFAULT; return 0; case SNDCTL_DSP_GETOPTR: /* _SIOR ('P',18, count_info) */ DBGX("SNDCTL_DSP_GETOPTR\n"); if (!wport) return -EINVAL; spin_lock_irqsave(&wport->lock, flags); { ustmsc_t ustmsc; if (wport->hwstate == HW_RUNNING) { ASSERT(wport->swstate == SW_RUN); li_read_USTMSC(&wport->chan, &ustmsc); info.bytes = ustmsc.msc - wport->MSC_offset; info.bytes *= wport->frame_size; } else { info.bytes = wport->byte_count; } info.blocks = wport->frag_count; info.ptr = 0; /* not implemented */ wport->frag_count = 0; } spin_unlock_irqrestore(&wport->lock, flags); if (copy_to_user((void *) arg, &info, sizeof info)) return -EFAULT; return 0; case SNDCTL_DSP_GETODELAY: /* _SIOR ('P', 23, int) */ DBGX("SNDCTL_DSP_GETODELAY\n"); if (!wport) return -EINVAL; spin_lock_irqsave(&wport->lock, flags); { int fsize = wport->frame_size; ival = wport->swb_i_avail / fsize; if (wport->hwstate == HW_RUNNING) { int swptr, hwptr, hwframes, hwbytes, hwsize; int totalhwbytes; ustmsc_t ustmsc; hwsize = wport->hwbuf_size; swptr = li_read_swptr(&wport->chan); li_read_USTMSC(&wport->chan, &ustmsc); hwframes = ustmsc.msc - wport->MSC_offset; totalhwbytes = hwframes * fsize; hwptr = totalhwbytes % hwsize; hwbytes = (swptr - hwptr + hwsize) % hwsize; ival += hwbytes / fsize; } } spin_unlock_irqrestore(&wport->lock, flags); return put_user(ival, (int *) arg); case SNDCTL_DSP_PROFILE: /* _SIOW ('P', 23, int) */ DBGX("SNDCTL_DSP_PROFILE\n"); /* * Thomas Sailer explains SNDCTL_DSP_PROFILE * (private email, March 24, 1999): * * This gives the sound driver a hint on what it * should do with partial fragments * (i.e. fragments partially filled with write). * This can direct the driver to zero them or * leave them alone. But don't ask me what this * is good for, my driver just zeroes the last * fragment before the receiver stops, no idea * what good for any other behaviour could * be. Implementing it as NOP seems safe. */ break; case SNDCTL_DSP_GETTRIGGER: /* _SIOR ('P',16, int) */ DBGX("SNDCTL_DSP_GETTRIGGER\n"); ival = 0; if (rport) { spin_lock_irqsave(&rport->lock, flags); { if (!(rport->flags & DISABLED)) ival |= PCM_ENABLE_INPUT; } spin_unlock_irqrestore(&rport->lock, flags); } if (wport) { spin_lock_irqsave(&wport->lock, flags); { if (!(wport->flags & DISABLED)) ival |= PCM_ENABLE_OUTPUT; } spin_unlock_irqrestore(&wport->lock, flags); } return put_user(ival, (int *) arg); case SNDCTL_DSP_SETTRIGGER: /* _SIOW ('P',16, int) */ if (get_user(ival, (int *) arg)) return -EFAULT; DBGX("SNDCTL_DSP_SETTRIGGER %d\n", ival); /* * If user is disabling I/O and port is not in initial * state, fail with EINVAL. */ if (((rport && !(ival & PCM_ENABLE_INPUT)) || (wport && !(ival & PCM_ENABLE_OUTPUT))) && aport->swstate != SW_INITIAL) return -EINVAL; if (rport) { vwsnd_port_hwstate_t hwstate; spin_lock_irqsave(&rport->lock, flags); { hwstate = rport->hwstate; if (ival & PCM_ENABLE_INPUT) rport->flags &= ~DISABLED; else rport->flags |= DISABLED; } spin_unlock_irqrestore(&rport->lock, flags); if (hwstate != HW_RUNNING && ival & PCM_ENABLE_INPUT) { if (rport->swstate == SW_INITIAL) pcm_setup(devc, rport, wport); else li_activate_dma(&rport->chan); } } if (wport) { vwsnd_port_flags_t pflags; spin_lock_irqsave(&wport->lock, flags); { pflags = wport->flags; if (ival & PCM_ENABLE_OUTPUT) wport->flags &= ~DISABLED; else wport->flags |= DISABLED; } spin_unlock_irqrestore(&wport->lock, flags); if (pflags & DISABLED && ival & PCM_ENABLE_OUTPUT) { if (wport->swstate == SW_RUN) pcm_output(devc, 0, 0); } } return 0; default: DBGP("unknown ioctl 0x%x\n", cmd); return -EINVAL; } DBGP("unimplemented ioctl 0x%x\n", cmd); return -EINVAL; } static long vwsnd_audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; int ret; mutex_lock(&vwsnd_mutex); mutex_lock(&devc->io_mutex); ret = vwsnd_audio_do_ioctl(file, cmd, arg); mutex_unlock(&devc->io_mutex); mutex_unlock(&vwsnd_mutex); return ret; } /* No mmap. */ static int vwsnd_audio_mmap(struct file *file, struct vm_area_struct *vma) { DBGE("(file=0x%p, vma=0x%p)\n", file, vma); return -ENODEV; } /* * Open the audio device for read and/or write. * * Returns 0 on success, -errno on failure. */ static int vwsnd_audio_open(struct inode *inode, struct file *file) { vwsnd_dev_t *devc; int minor = iminor(inode); int sw_samplefmt; DBGE("(inode=0x%p, file=0x%p)\n", inode, file); mutex_lock(&vwsnd_mutex); INC_USE_COUNT; for (devc = vwsnd_dev_list; devc; devc = devc->next_dev) if ((devc->audio_minor & ~0x0F) == (minor & ~0x0F)) break; if (devc == NULL) { DEC_USE_COUNT; mutex_unlock(&vwsnd_mutex); return -ENODEV; } mutex_lock(&devc->open_mutex); while (devc->open_mode & file->f_mode) { mutex_unlock(&devc->open_mutex); if (file->f_flags & O_NONBLOCK) { DEC_USE_COUNT; mutex_unlock(&vwsnd_mutex); return -EBUSY; } interruptible_sleep_on(&devc->open_wait); if (signal_pending(current)) { DEC_USE_COUNT; mutex_unlock(&vwsnd_mutex); return -ERESTARTSYS; } mutex_lock(&devc->open_mutex); } devc->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); mutex_unlock(&devc->open_mutex); /* get default sample format from minor number. */ sw_samplefmt = 0; if ((minor & 0xF) == SND_DEV_DSP) sw_samplefmt = AFMT_U8; else if ((minor & 0xF) == SND_DEV_AUDIO) sw_samplefmt = AFMT_MU_LAW; else if ((minor & 0xF) == SND_DEV_DSP16) sw_samplefmt = AFMT_S16_LE; else ASSERT(0); /* Initialize vwsnd_ports. */ mutex_lock(&devc->io_mutex); { if (file->f_mode & FMODE_READ) { devc->rport.swstate = SW_INITIAL; devc->rport.flags = 0; devc->rport.sw_channels = 1; devc->rport.sw_samplefmt = sw_samplefmt; devc->rport.sw_framerate = 8000; devc->rport.sw_fragshift = DEFAULT_FRAGSHIFT; devc->rport.sw_fragcount = DEFAULT_FRAGCOUNT; devc->rport.sw_subdivshift = DEFAULT_SUBDIVSHIFT; devc->rport.byte_count = 0; devc->rport.frag_count = 0; } if (file->f_mode & FMODE_WRITE) { devc->wport.swstate = SW_INITIAL; devc->wport.flags = 0; devc->wport.sw_channels = 1; devc->wport.sw_samplefmt = sw_samplefmt; devc->wport.sw_framerate = 8000; devc->wport.sw_fragshift = DEFAULT_FRAGSHIFT; devc->wport.sw_fragcount = DEFAULT_FRAGCOUNT; devc->wport.sw_subdivshift = DEFAULT_SUBDIVSHIFT; devc->wport.byte_count = 0; devc->wport.frag_count = 0; } } mutex_unlock(&devc->io_mutex); file->private_data = devc; DBGRV(); mutex_unlock(&vwsnd_mutex); return 0; } /* * Release (close) the audio device. */ static int vwsnd_audio_release(struct inode *inode, struct file *file) { vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; vwsnd_port_t *wport = NULL, *rport = NULL; int err = 0; mutex_lock(&vwsnd_mutex); mutex_lock(&devc->io_mutex); { DBGEV("(inode=0x%p, file=0x%p)\n", inode, file); if (file->f_mode & FMODE_READ) rport = &devc->rport; if (file->f_mode & FMODE_WRITE) { wport = &devc->wport; pcm_flush_frag(devc); pcm_write_sync(devc); } pcm_shutdown(devc, rport, wport); if (rport) rport->swstate = SW_OFF; if (wport) wport->swstate = SW_OFF; } mutex_unlock(&devc->io_mutex); mutex_lock(&devc->open_mutex); { devc->open_mode &= ~file->f_mode; } mutex_unlock(&devc->open_mutex); wake_up(&devc->open_wait); DEC_USE_COUNT; DBGR(); mutex_unlock(&vwsnd_mutex); return err; } static const struct file_operations vwsnd_audio_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = vwsnd_audio_read, .write = vwsnd_audio_write, .poll = vwsnd_audio_poll, .unlocked_ioctl = vwsnd_audio_ioctl, .mmap = vwsnd_audio_mmap, .open = vwsnd_audio_open, .release = vwsnd_audio_release, }; /*****************************************************************************/ /* mixer driver */ /* open the mixer device. */ static int vwsnd_mixer_open(struct inode *inode, struct file *file) { vwsnd_dev_t *devc; DBGEV("(inode=0x%p, file=0x%p)\n", inode, file); INC_USE_COUNT; mutex_lock(&vwsnd_mutex); for (devc = vwsnd_dev_list; devc; devc = devc->next_dev) if (devc->mixer_minor == iminor(inode)) break; if (devc == NULL) { DEC_USE_COUNT; mutex_unlock(&vwsnd_mutex); return -ENODEV; } file->private_data = devc; mutex_unlock(&vwsnd_mutex); return 0; } /* release (close) the mixer device. */ static int vwsnd_mixer_release(struct inode *inode, struct file *file) { DBGEV("(inode=0x%p, file=0x%p)\n", inode, file); DEC_USE_COUNT; return 0; } /* mixer_read_ioctl handles all read ioctls on the mixer device. */ static int mixer_read_ioctl(vwsnd_dev_t *devc, unsigned int nr, void __user *arg) { int val = -1; DBGEV("(devc=0x%p, nr=0x%x, arg=0x%p)\n", devc, nr, arg); switch (nr) { case SOUND_MIXER_CAPS: val = SOUND_CAP_EXCL_INPUT; break; case SOUND_MIXER_DEVMASK: val = (SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_RECLEV); break; case SOUND_MIXER_STEREODEVS: val = (SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_RECLEV); break; case SOUND_MIXER_OUTMASK: val = (SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD); break; case SOUND_MIXER_RECMASK: val = (SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD); break; case SOUND_MIXER_PCM: val = ad1843_get_gain(&devc->lith, &ad1843_gain_PCM); break; case SOUND_MIXER_LINE: val = ad1843_get_gain(&devc->lith, &ad1843_gain_LINE); break; case SOUND_MIXER_MIC: val = ad1843_get_gain(&devc->lith, &ad1843_gain_MIC); break; case SOUND_MIXER_CD: val = ad1843_get_gain(&devc->lith, &ad1843_gain_CD); break; case SOUND_MIXER_RECLEV: val = ad1843_get_gain(&devc->lith, &ad1843_gain_RECLEV); break; case SOUND_MIXER_RECSRC: val = ad1843_get_recsrc(&devc->lith); break; case SOUND_MIXER_OUTSRC: val = ad1843_get_outsrc(&devc->lith); break; default: return -EINVAL; } return put_user(val, (int __user *) arg); } /* mixer_write_ioctl handles all write ioctls on the mixer device. */ static int mixer_write_ioctl(vwsnd_dev_t *devc, unsigned int nr, void __user *arg) { int val; int err; DBGEV("(devc=0x%p, nr=0x%x, arg=0x%p)\n", devc, nr, arg); err = get_user(val, (int __user *) arg); if (err) return -EFAULT; switch (nr) { case SOUND_MIXER_PCM: val = ad1843_set_gain(&devc->lith, &ad1843_gain_PCM, val); break; case SOUND_MIXER_LINE: val = ad1843_set_gain(&devc->lith, &ad1843_gain_LINE, val); break; case SOUND_MIXER_MIC: val = ad1843_set_gain(&devc->lith, &ad1843_gain_MIC, val); break; case SOUND_MIXER_CD: val = ad1843_set_gain(&devc->lith, &ad1843_gain_CD, val); break; case SOUND_MIXER_RECLEV: val = ad1843_set_gain(&devc->lith, &ad1843_gain_RECLEV, val); break; case SOUND_MIXER_RECSRC: if (devc->rport.swbuf || devc->wport.swbuf) return -EBUSY; /* can't change recsrc while running */ val = ad1843_set_recsrc(&devc->lith, val); break; case SOUND_MIXER_OUTSRC: val = ad1843_set_outsrc(&devc->lith, val); break; default: return -EINVAL; } if (val < 0) return val; return put_user(val, (int __user *) arg); } /* This is the ioctl entry to the mixer driver. */ static long vwsnd_mixer_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; const unsigned int nrmask = _IOC_NRMASK << _IOC_NRSHIFT; const unsigned int nr = (cmd & nrmask) >> _IOC_NRSHIFT; int retval; DBGEV("(devc=0x%p, cmd=0x%x, arg=0x%lx)\n", devc, cmd, arg); mutex_lock(&vwsnd_mutex); mutex_lock(&devc->mix_mutex); { if ((cmd & ~nrmask) == MIXER_READ(0)) retval = mixer_read_ioctl(devc, nr, (void __user *) arg); else if ((cmd & ~nrmask) == MIXER_WRITE(0)) retval = mixer_write_ioctl(devc, nr, (void __user *) arg); else retval = -EINVAL; } mutex_unlock(&devc->mix_mutex); mutex_unlock(&vwsnd_mutex); return retval; } static const struct file_operations vwsnd_mixer_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = vwsnd_mixer_ioctl, .open = vwsnd_mixer_open, .release = vwsnd_mixer_release, }; /*****************************************************************************/ /* probe/attach/unload */ /* driver probe routine. Return nonzero if hardware is found. */ static int __init probe_vwsnd(struct address_info *hw_config) { lithium_t lith; int w; unsigned long later; DBGEV("(hw_config=0x%p)\n", hw_config); /* XXX verify lithium present (to prevent crash on non-vw) */ if (li_create(&lith, hw_config->io_base) != 0) { printk(KERN_WARNING "probe_vwsnd: can't map lithium\n"); return 0; } later = jiffies + 2; li_writel(&lith, LI_HOST_CONTROLLER, LI_HC_LINK_ENABLE); do { w = li_readl(&lith, LI_HOST_CONTROLLER); } while (w == LI_HC_LINK_ENABLE && time_before(jiffies, later)); li_destroy(&lith); DBGPV("HC = 0x%04x\n", w); if ((w == LI_HC_LINK_ENABLE) || (w & LI_HC_LINK_CODEC)) { /* This may indicate a beta machine with no audio, * or a future machine with different audio. * On beta-release 320 w/ no audio, HC == 0x4000 */ printk(KERN_WARNING "probe_vwsnd: audio codec not found\n"); return 0; } if (w & LI_HC_LINK_FAILURE) { printk(KERN_WARNING "probe_vwsnd: can't init audio codec\n"); return 0; } printk(KERN_INFO "vwsnd: lithium audio at mmio %#x irq %d\n", hw_config->io_base, hw_config->irq); return 1; } /* * driver attach routine. Initialize driver data structures and * initialize hardware. A new vwsnd_dev_t is allocated and put * onto the global list, vwsnd_dev_list. * * Return +minor_dev on success, -errno on failure. */ static int __init attach_vwsnd(struct address_info *hw_config) { vwsnd_dev_t *devc = NULL; int err = -ENOMEM; DBGEV("(hw_config=0x%p)\n", hw_config); devc = kmalloc(sizeof (vwsnd_dev_t), GFP_KERNEL); if (devc == NULL) goto fail0; err = li_create(&devc->lith, hw_config->io_base); if (err) goto fail1; init_waitqueue_head(&devc->open_wait); devc->rport.hwbuf_size = HWBUF_SIZE; devc->rport.hwbuf_vaddr = __get_free_pages(GFP_KERNEL, HWBUF_ORDER); if (!devc->rport.hwbuf_vaddr) goto fail2; devc->rport.hwbuf = (void *) devc->rport.hwbuf_vaddr; devc->rport.hwbuf_paddr = virt_to_phys(devc->rport.hwbuf); /* * Quote from the NT driver: * * // WARNING!!! HACK to setup output dma!!! * // This is required because even on output there is some data * // trickling into the input DMA channel. This is a bug in the * // Lithium microcode. * // --sde * * We set the input side's DMA base address here. It will remain * valid until the driver is unloaded. */ li_writel(&devc->lith, LI_COMM1_BASE, devc->rport.hwbuf_paddr >> 8 | 1 << (37 - 8)); devc->wport.hwbuf_size = HWBUF_SIZE; devc->wport.hwbuf_vaddr = __get_free_pages(GFP_KERNEL, HWBUF_ORDER); if (!devc->wport.hwbuf_vaddr) goto fail3; devc->wport.hwbuf = (void *) devc->wport.hwbuf_vaddr; devc->wport.hwbuf_paddr = virt_to_phys(devc->wport.hwbuf); DBGP("wport hwbuf = 0x%p\n", devc->wport.hwbuf); DBGDO(shut_up++); err = ad1843_init(&devc->lith); DBGDO(shut_up--); if (err) goto fail4; /* install interrupt handler */ err = request_irq(hw_config->irq, vwsnd_audio_intr, 0, "vwsnd", devc); if (err) goto fail5; /* register this device's drivers. */ devc->audio_minor = register_sound_dsp(&vwsnd_audio_fops, -1); if ((err = devc->audio_minor) < 0) { DBGDO(printk(KERN_WARNING "attach_vwsnd: register_sound_dsp error %d\n", err)); goto fail6; } devc->mixer_minor = register_sound_mixer(&vwsnd_mixer_fops, devc->audio_minor >> 4); if ((err = devc->mixer_minor) < 0) { DBGDO(printk(KERN_WARNING "attach_vwsnd: register_sound_mixer error %d\n", err)); goto fail7; } /* Squirrel away device indices for unload routine. */ hw_config->slots[0] = devc->audio_minor; /* Initialize as much of *devc as possible */ mutex_init(&devc->open_mutex); mutex_init(&devc->io_mutex); mutex_init(&devc->mix_mutex); devc->open_mode = 0; spin_lock_init(&devc->rport.lock); init_waitqueue_head(&devc->rport.queue); devc->rport.swstate = SW_OFF; devc->rport.hwstate = HW_STOPPED; devc->rport.flags = 0; devc->rport.swbuf = NULL; spin_lock_init(&devc->wport.lock); init_waitqueue_head(&devc->wport.queue); devc->wport.swstate = SW_OFF; devc->wport.hwstate = HW_STOPPED; devc->wport.flags = 0; devc->wport.swbuf = NULL; /* Success. Link us onto the local device list. */ devc->next_dev = vwsnd_dev_list; vwsnd_dev_list = devc; return devc->audio_minor; /* So many ways to fail. Undo what we did. */ fail7: unregister_sound_dsp(devc->audio_minor); fail6: free_irq(hw_config->irq, devc); fail5: fail4: free_pages(devc->wport.hwbuf_vaddr, HWBUF_ORDER); fail3: free_pages(devc->rport.hwbuf_vaddr, HWBUF_ORDER); fail2: li_destroy(&devc->lith); fail1: kfree(devc); fail0: return err; } static int __exit unload_vwsnd(struct address_info *hw_config) { vwsnd_dev_t *devc, **devcp; DBGE("()\n"); devcp = &vwsnd_dev_list; while ((devc = *devcp)) { if (devc->audio_minor == hw_config->slots[0]) { *devcp = devc->next_dev; break; } devcp = &devc->next_dev; } if (!devc) return -ENODEV; unregister_sound_mixer(devc->mixer_minor); unregister_sound_dsp(devc->audio_minor); free_irq(hw_config->irq, devc); free_pages(devc->wport.hwbuf_vaddr, HWBUF_ORDER); free_pages(devc->rport.hwbuf_vaddr, HWBUF_ORDER); li_destroy(&devc->lith); kfree(devc); return 0; } /*****************************************************************************/ /* initialization and loadable kernel module interface */ static struct address_info the_hw_config = { 0xFF001000, /* lithium phys addr */ CO_IRQ(CO_APIC_LI_AUDIO) /* irq */ }; MODULE_DESCRIPTION("SGI Visual Workstation sound module"); MODULE_AUTHOR("Bob Miller <kbob@sgi.com>"); MODULE_LICENSE("GPL"); static int __init init_vwsnd(void) { int err; DBGXV("\n"); DBGXV("sound::vwsnd::init_module()\n"); if (!probe_vwsnd(&the_hw_config)) return -ENODEV; err = attach_vwsnd(&the_hw_config); if (err < 0) return err; return 0; } static void __exit cleanup_vwsnd(void) { DBGX("sound::vwsnd::cleanup_module()\n"); unload_vwsnd(&the_hw_config); } module_init(init_vwsnd); module_exit(cleanup_vwsnd);
gpl-2.0
btolfa/kernel_tion_pro28
drivers/staging/wlan-ng/p80211wep.c
9217
9824
/* src/p80211/p80211wep.c * * WEP encode/decode for P80211. * * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- */ /*================================================================*/ /* System Includes */ #include <linux/netdevice.h> #include <linux/wireless.h> #include <linux/random.h> #include <linux/kernel.h> /* #define WEP_DEBUG */ #include "p80211hdr.h" #include "p80211types.h" #include "p80211msg.h" #include "p80211conv.h" #include "p80211netdev.h" #define WEP_KEY(x) (((x) & 0xC0) >> 6) static const u32 wep_crc32_table[256] = { 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, 0x2d02ef8dL }; /* keylen in bytes! */ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen) { if (keylen < 0) return -1; if (keylen >= MAX_KEYLEN) return -1; if (key == NULL) return -1; if (keynum < 0) return -1; if (keynum >= NUM_WEPKEYS) return -1; #ifdef WEP_DEBUG printk(KERN_DEBUG "WEP key %d len %d = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", keynum, keylen, key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7]); #endif wlandev->wep_keylens[keynum] = keylen; memcpy(wlandev->wep_keys[keynum], key, keylen); return 0; } /* 4-byte IV at start of buffer, 4-byte ICV at end of buffer. if successful, buf start is payload begin, length -= 8; */ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64], c_crc[4]; u8 keyidx; /* Needs to be at least 8 bytes of payload */ if (len <= 0) return -1; /* initialize the first bytes of the key from the IV */ key[0] = iv[0]; key[1] = iv[1]; key[2] = iv[2]; keyidx = WEP_KEY(iv[3]); if (key_override >= 0) keyidx = key_override; if (keyidx >= NUM_WEPKEYS) return -2; keylen = wlandev->wep_keylens[keyidx]; if (keylen == 0) return -3; /* copy the rest of the key over from the designated key */ memcpy(key + 3, wlandev->wep_keys[keyidx], keylen); keylen += 3; /* add in IV bytes */ #ifdef WEP_DEBUG printk(KERN_DEBUG "D %d: %02x %02x %02x (%d %d) %02x:%02x:%02x:%02x:%02x\n", len, key[0], key[1], key[2], keyidx, keylen, key[3], key[4], key[5], key[6], key[7]); #endif /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + s[i] + key[i % keylen]) & 0xff; swap(i, j); } /* Apply the RC4 to the data, update the CRC32 */ crc = ~0; i = j = 0; for (k = 0; k < len; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); buf[k] ^= s[(s[i] + s[j]) & 0xff]; crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); } crc = ~crc; /* now let's check the crc */ c_crc[0] = crc; c_crc[1] = crc >> 8; c_crc[2] = crc >> 16; c_crc[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); if ((c_crc[k] ^ s[(s[i] + s[j]) & 0xff]) != icv[k]) return -(4 | (k << 4)); /* ICV mismatch */ } return 0; } /* encrypts in-place. */ int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64]; /* no point in WEPping an empty frame */ if (len <= 0) return -1; /* we need to have a real key.. */ if (keynum >= NUM_WEPKEYS) return -2; keylen = wlandev->wep_keylens[keynum]; if (keylen <= 0) return -3; /* use a random IV. And skip known weak ones. */ get_random_bytes(iv, 3); while ((iv[1] == 0xff) && (iv[0] >= 3) && (iv[0] < keylen)) get_random_bytes(iv, 3); iv[3] = (keynum & 0x03) << 6; key[0] = iv[0]; key[1] = iv[1]; key[2] = iv[2]; /* copy the rest of the key over from the designated key */ memcpy(key + 3, wlandev->wep_keys[keynum], keylen); keylen += 3; /* add in IV bytes */ #ifdef WEP_DEBUG printk(KERN_DEBUG "E %d (%d/%d %d) %02x %02x %02x %02x:%02x:%02x:%02x:%02x\n", len, iv[3], keynum, keylen, key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7]); #endif /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + s[i] + key[i % keylen]) & 0xff; swap(i, j); } /* Update CRC32 then apply RC4 to the data */ crc = ~0; i = j = 0; for (k = 0; k < len; k++) { crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff]; } crc = ~crc; /* now let's encrypt the crc */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); icv[k] ^= s[(s[i] + s[j]) & 0xff]; } return 0; }
gpl-2.0
klquicksall/Ace-GB-DHD
arch/cris/arch-v32/mach-a3/cpufreq.c
9473
3532
#include <linux/init.h> #include <linux/module.h> #include <linux/cpufreq.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/clkgen_defs.h> #include <hwregs/ddr2_defs.h> static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data); static struct notifier_block cris_sdram_freq_notifier_block = { .notifier_call = cris_sdram_freq_notifier }; static struct cpufreq_frequency_table cris_freq_table[] = { {0x01, 6000}, {0x02, 200000}, {0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) { reg_clkgen_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); return clk_ctrl.pll ? 200000 : 6000; } static void cris_freq_set_cpu_state(unsigned int state) { int i = 0; struct cpufreq_freqs freqs; reg_clkgen_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); #ifdef CONFIG_SMP for_each_present_cpu(i) #endif { freqs.old = cris_freq_get_cpu_frequency(i); freqs.new = cris_freq_table[state].frequency; freqs.cpu = i; } cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_disable(); /* Even though we may be SMP they will share the same clock * so all settings are made on CPU0. */ if (cris_freq_table[state].frequency == 200000) clk_ctrl.pll = 1; else clk_ctrl.pll = 0; REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); local_irq_enable(); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); }; static int cris_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); } static int cris_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; if (cpufreq_frequency_table_target(policy, cris_freq_table, target_freq, relation, &newstate)) return -EINVAL; cris_freq_set_cpu_state(newstate); return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { int result; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ policy->cur = cris_freq_get_cpu_frequency(0); result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); if (result) return (result); cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); return 0; } static int cris_freq_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *cris_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, .verify = cris_freq_verify, .target = cris_freq_target, .init = cris_freq_cpu_init, .exit = cris_freq_cpu_exit, .name = "cris_freq", .owner = THIS_MODULE, .attr = cris_freq_attr, }; static int __init cris_freq_init(void) { int ret; ret = cpufreq_register_driver(&cris_freq_driver); cpufreq_register_notifier(&cris_sdram_freq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return ret; } static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data) { int i; struct cpufreq_freqs *freqs = data; if (val == CPUFREQ_PRECHANGE) { reg_ddr2_rw_cfg cfg = REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg); cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46); if (freqs->new == 200000) for (i = 0; i < 50000; i++); REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); } return 0; } module_init(cris_freq_init);
gpl-2.0
AOKP/kernel_sony_msm8x60
drivers/ide/ide-cd_verbose.c
12545
13840
/* * Verbose error logging for ATAPI CD/DVD devices. * * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov> * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org> * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de> */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/cdrom.h> #include <scsi/scsi.h> #ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS void ide_cd_log_error(const char *name, struct request *failed_command, struct request_sense *sense) { /* Suppress printing unit attention and `in progress of becoming ready' errors when we're not being verbose. */ if (sense->sense_key == UNIT_ATTENTION || (sense->sense_key == NOT_READY && (sense->asc == 4 || sense->asc == 0x3a))) return; printk(KERN_ERR "%s: error code: 0x%02x sense_key: 0x%02x " "asc: 0x%02x ascq: 0x%02x\n", name, sense->error_code, sense->sense_key, sense->asc, sense->ascq); } #else /* The generic packet command opcodes for CD/DVD Logical Units, * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const struct { unsigned short packet_command; const char * const text; } packet_command_texts[] = { { GPCMD_TEST_UNIT_READY, "Test Unit Ready" }, { GPCMD_REQUEST_SENSE, "Request Sense" }, { GPCMD_FORMAT_UNIT, "Format Unit" }, { GPCMD_INQUIRY, "Inquiry" }, { GPCMD_START_STOP_UNIT, "Start/Stop Unit" }, { GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, "Prevent/Allow Medium Removal" }, { GPCMD_READ_FORMAT_CAPACITIES, "Read Format Capacities" }, { GPCMD_READ_CDVD_CAPACITY, "Read Cd/Dvd Capacity" }, { GPCMD_READ_10, "Read 10" }, { GPCMD_WRITE_10, "Write 10" }, { GPCMD_SEEK, "Seek" }, { GPCMD_WRITE_AND_VERIFY_10, "Write and Verify 10" }, { GPCMD_VERIFY_10, "Verify 10" }, { GPCMD_FLUSH_CACHE, "Flush Cache" }, { GPCMD_READ_SUBCHANNEL, "Read Subchannel" }, { GPCMD_READ_TOC_PMA_ATIP, "Read Table of Contents" }, { GPCMD_READ_HEADER, "Read Header" }, { GPCMD_PLAY_AUDIO_10, "Play Audio 10" }, { GPCMD_GET_CONFIGURATION, "Get Configuration" }, { GPCMD_PLAY_AUDIO_MSF, "Play Audio MSF" }, { GPCMD_PLAYAUDIO_TI, "Play Audio TrackIndex" }, { GPCMD_GET_EVENT_STATUS_NOTIFICATION, "Get Event Status Notification" }, { GPCMD_PAUSE_RESUME, "Pause/Resume" }, { GPCMD_STOP_PLAY_SCAN, "Stop Play/Scan" }, { GPCMD_READ_DISC_INFO, "Read Disc Info" }, { GPCMD_READ_TRACK_RZONE_INFO, "Read Track Rzone Info" }, { GPCMD_RESERVE_RZONE_TRACK, "Reserve Rzone Track" }, { GPCMD_SEND_OPC, "Send OPC" }, { GPCMD_MODE_SELECT_10, "Mode Select 10" }, { GPCMD_REPAIR_RZONE_TRACK, "Repair Rzone Track" }, { GPCMD_MODE_SENSE_10, "Mode Sense 10" }, { GPCMD_CLOSE_TRACK, "Close Track" }, { GPCMD_BLANK, "Blank" }, { GPCMD_SEND_EVENT, "Send Event" }, { GPCMD_SEND_KEY, "Send Key" }, { GPCMD_REPORT_KEY, "Report Key" }, { GPCMD_LOAD_UNLOAD, "Load/Unload" }, { GPCMD_SET_READ_AHEAD, "Set Read-ahead" }, { GPCMD_READ_12, "Read 12" }, { GPCMD_GET_PERFORMANCE, "Get Performance" }, { GPCMD_SEND_DVD_STRUCTURE, "Send DVD Structure" }, { GPCMD_READ_DVD_STRUCTURE, "Read DVD Structure" }, { GPCMD_SET_STREAMING, "Set Streaming" }, { GPCMD_READ_CD_MSF, "Read CD MSF" }, { GPCMD_SCAN, "Scan" }, { GPCMD_SET_SPEED, "Set Speed" }, { GPCMD_PLAY_CD, "Play CD" }, { GPCMD_MECHANISM_STATUS, "Mechanism Status" }, { GPCMD_READ_CD, "Read CD" }, }; /* From Table 303 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const char * const sense_key_texts[16] = { "No sense data", "Recovered error", "Not ready", "Medium error", "Hardware error", "Illegal request", "Unit attention", "Data protect", "Blank check", "(reserved)", "(reserved)", "Aborted command", "(reserved)", "(reserved)", "Miscompare", "(reserved)", }; /* From Table 304 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const struct { unsigned long asc_ascq; const char * const text; } sense_data_texts[] = { { 0x000000, "No additional sense information" }, { 0x000011, "Play operation in progress" }, { 0x000012, "Play operation paused" }, { 0x000013, "Play operation successfully completed" }, { 0x000014, "Play operation stopped due to error" }, { 0x000015, "No current audio status to return" }, { 0x010c0a, "Write error - padding blocks added" }, { 0x011700, "Recovered data with no error correction applied" }, { 0x011701, "Recovered data with retries" }, { 0x011702, "Recovered data with positive head offset" }, { 0x011703, "Recovered data with negative head offset" }, { 0x011704, "Recovered data with retries and/or CIRC applied" }, { 0x011705, "Recovered data using previous sector ID" }, { 0x011800, "Recovered data with error correction applied" }, { 0x011801, "Recovered data with error correction and retries applied"}, { 0x011802, "Recovered data - the data was auto-reallocated" }, { 0x011803, "Recovered data with CIRC" }, { 0x011804, "Recovered data with L-EC" }, { 0x015d00, "Failure prediction threshold exceeded" " - Predicted logical unit failure" }, { 0x015d01, "Failure prediction threshold exceeded" " - Predicted media failure" }, { 0x015dff, "Failure prediction threshold exceeded - False" }, { 0x017301, "Power calibration area almost full" }, { 0x020400, "Logical unit not ready - cause not reportable" }, /* Following is misspelled in ATAPI 2.6, _and_ in Mt. Fuji */ { 0x020401, "Logical unit not ready" " - in progress [sic] of becoming ready" }, { 0x020402, "Logical unit not ready - initializing command required" }, { 0x020403, "Logical unit not ready - manual intervention required" }, { 0x020404, "Logical unit not ready - format in progress" }, { 0x020407, "Logical unit not ready - operation in progress" }, { 0x020408, "Logical unit not ready - long write in progress" }, { 0x020600, "No reference position found (media may be upside down)" }, { 0x023000, "Incompatible medium installed" }, { 0x023a00, "Medium not present" }, { 0x025300, "Media load or eject failed" }, { 0x025700, "Unable to recover table of contents" }, { 0x030300, "Peripheral device write fault" }, { 0x030301, "No write current" }, { 0x030302, "Excessive write errors" }, { 0x030c00, "Write error" }, { 0x030c01, "Write error - Recovered with auto reallocation" }, { 0x030c02, "Write error - auto reallocation failed" }, { 0x030c03, "Write error - recommend reassignment" }, { 0x030c04, "Compression check miscompare error" }, { 0x030c05, "Data expansion occurred during compress" }, { 0x030c06, "Block not compressible" }, { 0x030c07, "Write error - recovery needed" }, { 0x030c08, "Write error - recovery failed" }, { 0x030c09, "Write error - loss of streaming" }, { 0x031100, "Unrecovered read error" }, { 0x031106, "CIRC unrecovered error" }, { 0x033101, "Format command failed" }, { 0x033200, "No defect spare location available" }, { 0x033201, "Defect list update failure" }, { 0x035100, "Erase failure" }, { 0x037200, "Session fixation error" }, { 0x037201, "Session fixation error writin lead-in" }, { 0x037202, "Session fixation error writin lead-out" }, { 0x037300, "CD control error" }, { 0x037302, "Power calibration area is full" }, { 0x037303, "Power calibration area error" }, { 0x037304, "Program memory area / RMA update failure" }, { 0x037305, "Program memory area / RMA is full" }, { 0x037306, "Program memory area / RMA is (almost) full" }, { 0x040200, "No seek complete" }, { 0x040300, "Write fault" }, { 0x040900, "Track following error" }, { 0x040901, "Tracking servo failure" }, { 0x040902, "Focus servo failure" }, { 0x040903, "Spindle servo failure" }, { 0x041500, "Random positioning error" }, { 0x041501, "Mechanical positioning or changer error" }, { 0x041502, "Positioning error detected by read of medium" }, { 0x043c00, "Mechanical positioning or changer error" }, { 0x044000, "Diagnostic failure on component (ASCQ)" }, { 0x044400, "Internal CD/DVD logical unit failure" }, { 0x04b600, "Media load mechanism failed" }, { 0x051a00, "Parameter list length error" }, { 0x052000, "Invalid command operation code" }, { 0x052100, "Logical block address out of range" }, { 0x052102, "Invalid address for write" }, { 0x052400, "Invalid field in command packet" }, { 0x052600, "Invalid field in parameter list" }, { 0x052601, "Parameter not supported" }, { 0x052602, "Parameter value invalid" }, { 0x052700, "Write protected media" }, { 0x052c00, "Command sequence error" }, { 0x052c03, "Current program area is not empty" }, { 0x052c04, "Current program area is empty" }, { 0x053001, "Cannot read medium - unknown format" }, { 0x053002, "Cannot read medium - incompatible format" }, { 0x053900, "Saving parameters not supported" }, { 0x054e00, "Overlapped commands attempted" }, { 0x055302, "Medium removal prevented" }, { 0x055500, "System resource failure" }, { 0x056300, "End of user area encountered on this track" }, { 0x056400, "Illegal mode for this track or incompatible medium" }, { 0x056f00, "Copy protection key exchange failure" " - Authentication failure" }, { 0x056f01, "Copy protection key exchange failure - Key not present" }, { 0x056f02, "Copy protection key exchange failure" " - Key not established" }, { 0x056f03, "Read of scrambled sector without authentication" }, { 0x056f04, "Media region code is mismatched to logical unit" }, { 0x056f05, "Drive region must be permanent" " / region reset count error" }, { 0x057203, "Session fixation error - incomplete track in session" }, { 0x057204, "Empty or partially written reserved track" }, { 0x057205, "No more RZONE reservations are allowed" }, { 0x05bf00, "Loss of streaming" }, { 0x062800, "Not ready to ready transition, medium may have changed" }, { 0x062900, "Power on, reset or hardware reset occurred" }, { 0x062a00, "Parameters changed" }, { 0x062a01, "Mode parameters changed" }, { 0x062e00, "Insufficient time for operation" }, { 0x063f00, "Logical unit operating conditions have changed" }, { 0x063f01, "Microcode has been changed" }, { 0x065a00, "Operator request or state change input (unspecified)" }, { 0x065a01, "Operator medium removal request" }, { 0x0bb900, "Play operation aborted" }, /* Here we use 0xff for the key (not a valid key) to signify * that these can have _any_ key value associated with them... */ { 0xff0401, "Logical unit is in process of becoming ready" }, { 0xff0400, "Logical unit not ready, cause not reportable" }, { 0xff0402, "Logical unit not ready, initializing command required" }, { 0xff0403, "Logical unit not ready, manual intervention required" }, { 0xff0500, "Logical unit does not respond to selection" }, { 0xff0800, "Logical unit communication failure" }, { 0xff0802, "Logical unit communication parity error" }, { 0xff0801, "Logical unit communication time-out" }, { 0xff2500, "Logical unit not supported" }, { 0xff4c00, "Logical unit failed self-configuration" }, { 0xff3e00, "Logical unit has not self-configured yet" }, }; void ide_cd_log_error(const char *name, struct request *failed_command, struct request_sense *sense) { int i; const char *s = "bad sense key!"; char buf[80]; printk(KERN_ERR "ATAPI device %s:\n", name); if (sense->error_code == 0x70) printk(KERN_CONT " Error: "); else if (sense->error_code == 0x71) printk(" Deferred Error: "); else if (sense->error_code == 0x7f) printk(KERN_CONT " Vendor-specific Error: "); else printk(KERN_CONT " Unknown Error Type: "); if (sense->sense_key < ARRAY_SIZE(sense_key_texts)) s = sense_key_texts[sense->sense_key]; printk(KERN_CONT "%s -- (Sense key=0x%02x)\n", s, sense->sense_key); if (sense->asc == 0x40) { sprintf(buf, "Diagnostic failure on component 0x%02x", sense->ascq); s = buf; } else { int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts); unsigned long key = (sense->sense_key << 16); key |= (sense->asc << 8); if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd)) key |= sense->ascq; s = NULL; while (hi > lo) { mid = (lo + hi) / 2; if (sense_data_texts[mid].asc_ascq == key || sense_data_texts[mid].asc_ascq == (0xff0000|key)) { s = sense_data_texts[mid].text; break; } else if (sense_data_texts[mid].asc_ascq > key) hi = mid; else lo = mid + 1; } } if (s == NULL) { if (sense->asc > 0x80) s = "(vendor-specific error)"; else s = "(reserved error code)"; } printk(KERN_ERR " %s -- (asc=0x%02x, ascq=0x%02x)\n", s, sense->asc, sense->ascq); if (failed_command != NULL) { int lo = 0, mid, hi = ARRAY_SIZE(packet_command_texts); s = NULL; while (hi > lo) { mid = (lo + hi) / 2; if (packet_command_texts[mid].packet_command == failed_command->cmd[0]) { s = packet_command_texts[mid].text; break; } if (packet_command_texts[mid].packet_command > failed_command->cmd[0]) hi = mid; else lo = mid + 1; } printk(KERN_ERR " The failed \"%s\" packet command " "was: \n \"", s); for (i = 0; i < BLK_MAX_CDB; i++) printk(KERN_CONT "%02x ", failed_command->cmd[i]); printk(KERN_CONT "\"\n"); } /* The SKSV bit specifies validity of the sense_key_specific * in the next two commands. It is bit 7 of the first byte. * In the case of NOT_READY, if SKSV is set the drive can * give us nice ETA readings. */ if (sense->sense_key == NOT_READY && (sense->sks[0] & 0x80)) { int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100; printk(KERN_ERR " Command is %02d%% complete\n", progress / 0xffff); } if (sense->sense_key == ILLEGAL_REQUEST && (sense->sks[0] & 0x80) != 0) { printk(KERN_ERR " Error in %s byte %d", (sense->sks[0] & 0x40) != 0 ? "command packet" : "command data", (sense->sks[1] << 8) + sense->sks[2]); if ((sense->sks[0] & 0x40) != 0) printk(KERN_CONT " bit %d", sense->sks[0] & 0x07); printk(KERN_CONT "\n"); } } #endif
gpl-2.0
MSM8226-Samsung/kernel_samsung_msm8226
drivers/ide/ide-cd_verbose.c
12545
13840
/* * Verbose error logging for ATAPI CD/DVD devices. * * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov> * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org> * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de> */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/cdrom.h> #include <scsi/scsi.h> #ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS void ide_cd_log_error(const char *name, struct request *failed_command, struct request_sense *sense) { /* Suppress printing unit attention and `in progress of becoming ready' errors when we're not being verbose. */ if (sense->sense_key == UNIT_ATTENTION || (sense->sense_key == NOT_READY && (sense->asc == 4 || sense->asc == 0x3a))) return; printk(KERN_ERR "%s: error code: 0x%02x sense_key: 0x%02x " "asc: 0x%02x ascq: 0x%02x\n", name, sense->error_code, sense->sense_key, sense->asc, sense->ascq); } #else /* The generic packet command opcodes for CD/DVD Logical Units, * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const struct { unsigned short packet_command; const char * const text; } packet_command_texts[] = { { GPCMD_TEST_UNIT_READY, "Test Unit Ready" }, { GPCMD_REQUEST_SENSE, "Request Sense" }, { GPCMD_FORMAT_UNIT, "Format Unit" }, { GPCMD_INQUIRY, "Inquiry" }, { GPCMD_START_STOP_UNIT, "Start/Stop Unit" }, { GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, "Prevent/Allow Medium Removal" }, { GPCMD_READ_FORMAT_CAPACITIES, "Read Format Capacities" }, { GPCMD_READ_CDVD_CAPACITY, "Read Cd/Dvd Capacity" }, { GPCMD_READ_10, "Read 10" }, { GPCMD_WRITE_10, "Write 10" }, { GPCMD_SEEK, "Seek" }, { GPCMD_WRITE_AND_VERIFY_10, "Write and Verify 10" }, { GPCMD_VERIFY_10, "Verify 10" }, { GPCMD_FLUSH_CACHE, "Flush Cache" }, { GPCMD_READ_SUBCHANNEL, "Read Subchannel" }, { GPCMD_READ_TOC_PMA_ATIP, "Read Table of Contents" }, { GPCMD_READ_HEADER, "Read Header" }, { GPCMD_PLAY_AUDIO_10, "Play Audio 10" }, { GPCMD_GET_CONFIGURATION, "Get Configuration" }, { GPCMD_PLAY_AUDIO_MSF, "Play Audio MSF" }, { GPCMD_PLAYAUDIO_TI, "Play Audio TrackIndex" }, { GPCMD_GET_EVENT_STATUS_NOTIFICATION, "Get Event Status Notification" }, { GPCMD_PAUSE_RESUME, "Pause/Resume" }, { GPCMD_STOP_PLAY_SCAN, "Stop Play/Scan" }, { GPCMD_READ_DISC_INFO, "Read Disc Info" }, { GPCMD_READ_TRACK_RZONE_INFO, "Read Track Rzone Info" }, { GPCMD_RESERVE_RZONE_TRACK, "Reserve Rzone Track" }, { GPCMD_SEND_OPC, "Send OPC" }, { GPCMD_MODE_SELECT_10, "Mode Select 10" }, { GPCMD_REPAIR_RZONE_TRACK, "Repair Rzone Track" }, { GPCMD_MODE_SENSE_10, "Mode Sense 10" }, { GPCMD_CLOSE_TRACK, "Close Track" }, { GPCMD_BLANK, "Blank" }, { GPCMD_SEND_EVENT, "Send Event" }, { GPCMD_SEND_KEY, "Send Key" }, { GPCMD_REPORT_KEY, "Report Key" }, { GPCMD_LOAD_UNLOAD, "Load/Unload" }, { GPCMD_SET_READ_AHEAD, "Set Read-ahead" }, { GPCMD_READ_12, "Read 12" }, { GPCMD_GET_PERFORMANCE, "Get Performance" }, { GPCMD_SEND_DVD_STRUCTURE, "Send DVD Structure" }, { GPCMD_READ_DVD_STRUCTURE, "Read DVD Structure" }, { GPCMD_SET_STREAMING, "Set Streaming" }, { GPCMD_READ_CD_MSF, "Read CD MSF" }, { GPCMD_SCAN, "Scan" }, { GPCMD_SET_SPEED, "Set Speed" }, { GPCMD_PLAY_CD, "Play CD" }, { GPCMD_MECHANISM_STATUS, "Mechanism Status" }, { GPCMD_READ_CD, "Read CD" }, }; /* From Table 303 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const char * const sense_key_texts[16] = { "No sense data", "Recovered error", "Not ready", "Medium error", "Hardware error", "Illegal request", "Unit attention", "Data protect", "Blank check", "(reserved)", "(reserved)", "Aborted command", "(reserved)", "(reserved)", "Miscompare", "(reserved)", }; /* From Table 304 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const struct { unsigned long asc_ascq; const char * const text; } sense_data_texts[] = { { 0x000000, "No additional sense information" }, { 0x000011, "Play operation in progress" }, { 0x000012, "Play operation paused" }, { 0x000013, "Play operation successfully completed" }, { 0x000014, "Play operation stopped due to error" }, { 0x000015, "No current audio status to return" }, { 0x010c0a, "Write error - padding blocks added" }, { 0x011700, "Recovered data with no error correction applied" }, { 0x011701, "Recovered data with retries" }, { 0x011702, "Recovered data with positive head offset" }, { 0x011703, "Recovered data with negative head offset" }, { 0x011704, "Recovered data with retries and/or CIRC applied" }, { 0x011705, "Recovered data using previous sector ID" }, { 0x011800, "Recovered data with error correction applied" }, { 0x011801, "Recovered data with error correction and retries applied"}, { 0x011802, "Recovered data - the data was auto-reallocated" }, { 0x011803, "Recovered data with CIRC" }, { 0x011804, "Recovered data with L-EC" }, { 0x015d00, "Failure prediction threshold exceeded" " - Predicted logical unit failure" }, { 0x015d01, "Failure prediction threshold exceeded" " - Predicted media failure" }, { 0x015dff, "Failure prediction threshold exceeded - False" }, { 0x017301, "Power calibration area almost full" }, { 0x020400, "Logical unit not ready - cause not reportable" }, /* Following is misspelled in ATAPI 2.6, _and_ in Mt. Fuji */ { 0x020401, "Logical unit not ready" " - in progress [sic] of becoming ready" }, { 0x020402, "Logical unit not ready - initializing command required" }, { 0x020403, "Logical unit not ready - manual intervention required" }, { 0x020404, "Logical unit not ready - format in progress" }, { 0x020407, "Logical unit not ready - operation in progress" }, { 0x020408, "Logical unit not ready - long write in progress" }, { 0x020600, "No reference position found (media may be upside down)" }, { 0x023000, "Incompatible medium installed" }, { 0x023a00, "Medium not present" }, { 0x025300, "Media load or eject failed" }, { 0x025700, "Unable to recover table of contents" }, { 0x030300, "Peripheral device write fault" }, { 0x030301, "No write current" }, { 0x030302, "Excessive write errors" }, { 0x030c00, "Write error" }, { 0x030c01, "Write error - Recovered with auto reallocation" }, { 0x030c02, "Write error - auto reallocation failed" }, { 0x030c03, "Write error - recommend reassignment" }, { 0x030c04, "Compression check miscompare error" }, { 0x030c05, "Data expansion occurred during compress" }, { 0x030c06, "Block not compressible" }, { 0x030c07, "Write error - recovery needed" }, { 0x030c08, "Write error - recovery failed" }, { 0x030c09, "Write error - loss of streaming" }, { 0x031100, "Unrecovered read error" }, { 0x031106, "CIRC unrecovered error" }, { 0x033101, "Format command failed" }, { 0x033200, "No defect spare location available" }, { 0x033201, "Defect list update failure" }, { 0x035100, "Erase failure" }, { 0x037200, "Session fixation error" }, { 0x037201, "Session fixation error writin lead-in" }, { 0x037202, "Session fixation error writin lead-out" }, { 0x037300, "CD control error" }, { 0x037302, "Power calibration area is full" }, { 0x037303, "Power calibration area error" }, { 0x037304, "Program memory area / RMA update failure" }, { 0x037305, "Program memory area / RMA is full" }, { 0x037306, "Program memory area / RMA is (almost) full" }, { 0x040200, "No seek complete" }, { 0x040300, "Write fault" }, { 0x040900, "Track following error" }, { 0x040901, "Tracking servo failure" }, { 0x040902, "Focus servo failure" }, { 0x040903, "Spindle servo failure" }, { 0x041500, "Random positioning error" }, { 0x041501, "Mechanical positioning or changer error" }, { 0x041502, "Positioning error detected by read of medium" }, { 0x043c00, "Mechanical positioning or changer error" }, { 0x044000, "Diagnostic failure on component (ASCQ)" }, { 0x044400, "Internal CD/DVD logical unit failure" }, { 0x04b600, "Media load mechanism failed" }, { 0x051a00, "Parameter list length error" }, { 0x052000, "Invalid command operation code" }, { 0x052100, "Logical block address out of range" }, { 0x052102, "Invalid address for write" }, { 0x052400, "Invalid field in command packet" }, { 0x052600, "Invalid field in parameter list" }, { 0x052601, "Parameter not supported" }, { 0x052602, "Parameter value invalid" }, { 0x052700, "Write protected media" }, { 0x052c00, "Command sequence error" }, { 0x052c03, "Current program area is not empty" }, { 0x052c04, "Current program area is empty" }, { 0x053001, "Cannot read medium - unknown format" }, { 0x053002, "Cannot read medium - incompatible format" }, { 0x053900, "Saving parameters not supported" }, { 0x054e00, "Overlapped commands attempted" }, { 0x055302, "Medium removal prevented" }, { 0x055500, "System resource failure" }, { 0x056300, "End of user area encountered on this track" }, { 0x056400, "Illegal mode for this track or incompatible medium" }, { 0x056f00, "Copy protection key exchange failure" " - Authentication failure" }, { 0x056f01, "Copy protection key exchange failure - Key not present" }, { 0x056f02, "Copy protection key exchange failure" " - Key not established" }, { 0x056f03, "Read of scrambled sector without authentication" }, { 0x056f04, "Media region code is mismatched to logical unit" }, { 0x056f05, "Drive region must be permanent" " / region reset count error" }, { 0x057203, "Session fixation error - incomplete track in session" }, { 0x057204, "Empty or partially written reserved track" }, { 0x057205, "No more RZONE reservations are allowed" }, { 0x05bf00, "Loss of streaming" }, { 0x062800, "Not ready to ready transition, medium may have changed" }, { 0x062900, "Power on, reset or hardware reset occurred" }, { 0x062a00, "Parameters changed" }, { 0x062a01, "Mode parameters changed" }, { 0x062e00, "Insufficient time for operation" }, { 0x063f00, "Logical unit operating conditions have changed" }, { 0x063f01, "Microcode has been changed" }, { 0x065a00, "Operator request or state change input (unspecified)" }, { 0x065a01, "Operator medium removal request" }, { 0x0bb900, "Play operation aborted" }, /* Here we use 0xff for the key (not a valid key) to signify * that these can have _any_ key value associated with them... */ { 0xff0401, "Logical unit is in process of becoming ready" }, { 0xff0400, "Logical unit not ready, cause not reportable" }, { 0xff0402, "Logical unit not ready, initializing command required" }, { 0xff0403, "Logical unit not ready, manual intervention required" }, { 0xff0500, "Logical unit does not respond to selection" }, { 0xff0800, "Logical unit communication failure" }, { 0xff0802, "Logical unit communication parity error" }, { 0xff0801, "Logical unit communication time-out" }, { 0xff2500, "Logical unit not supported" }, { 0xff4c00, "Logical unit failed self-configuration" }, { 0xff3e00, "Logical unit has not self-configured yet" }, }; void ide_cd_log_error(const char *name, struct request *failed_command, struct request_sense *sense) { int i; const char *s = "bad sense key!"; char buf[80]; printk(KERN_ERR "ATAPI device %s:\n", name); if (sense->error_code == 0x70) printk(KERN_CONT " Error: "); else if (sense->error_code == 0x71) printk(" Deferred Error: "); else if (sense->error_code == 0x7f) printk(KERN_CONT " Vendor-specific Error: "); else printk(KERN_CONT " Unknown Error Type: "); if (sense->sense_key < ARRAY_SIZE(sense_key_texts)) s = sense_key_texts[sense->sense_key]; printk(KERN_CONT "%s -- (Sense key=0x%02x)\n", s, sense->sense_key); if (sense->asc == 0x40) { sprintf(buf, "Diagnostic failure on component 0x%02x", sense->ascq); s = buf; } else { int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts); unsigned long key = (sense->sense_key << 16); key |= (sense->asc << 8); if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd)) key |= sense->ascq; s = NULL; while (hi > lo) { mid = (lo + hi) / 2; if (sense_data_texts[mid].asc_ascq == key || sense_data_texts[mid].asc_ascq == (0xff0000|key)) { s = sense_data_texts[mid].text; break; } else if (sense_data_texts[mid].asc_ascq > key) hi = mid; else lo = mid + 1; } } if (s == NULL) { if (sense->asc > 0x80) s = "(vendor-specific error)"; else s = "(reserved error code)"; } printk(KERN_ERR " %s -- (asc=0x%02x, ascq=0x%02x)\n", s, sense->asc, sense->ascq); if (failed_command != NULL) { int lo = 0, mid, hi = ARRAY_SIZE(packet_command_texts); s = NULL; while (hi > lo) { mid = (lo + hi) / 2; if (packet_command_texts[mid].packet_command == failed_command->cmd[0]) { s = packet_command_texts[mid].text; break; } if (packet_command_texts[mid].packet_command > failed_command->cmd[0]) hi = mid; else lo = mid + 1; } printk(KERN_ERR " The failed \"%s\" packet command " "was: \n \"", s); for (i = 0; i < BLK_MAX_CDB; i++) printk(KERN_CONT "%02x ", failed_command->cmd[i]); printk(KERN_CONT "\"\n"); } /* The SKSV bit specifies validity of the sense_key_specific * in the next two commands. It is bit 7 of the first byte. * In the case of NOT_READY, if SKSV is set the drive can * give us nice ETA readings. */ if (sense->sense_key == NOT_READY && (sense->sks[0] & 0x80)) { int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100; printk(KERN_ERR " Command is %02d%% complete\n", progress / 0xffff); } if (sense->sense_key == ILLEGAL_REQUEST && (sense->sks[0] & 0x80) != 0) { printk(KERN_ERR " Error in %s byte %d", (sense->sks[0] & 0x40) != 0 ? "command packet" : "command data", (sense->sks[1] << 8) + sense->sks[2]); if ((sense->sks[0] & 0x40) != 0) printk(KERN_CONT " bit %d", sense->sks[0] & 0x07); printk(KERN_CONT "\n"); } } #endif
gpl-2.0
omor1/linux-430
arch/powerpc/platforms/pasemi/idle.c
13569
2410
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Maintained by: Olof Johansson <olof@lixom.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #undef DEBUG #include <linux/kernel.h> #include <linux/string.h> #include <linux/irq.h> #include <asm/machdep.h> #include <asm/reg.h> #include <asm/smp.h> #include "pasemi.h" struct sleep_mode { char *name; void (*entry)(void); }; static struct sleep_mode modes[] = { { .name = "spin", .entry = &idle_spin }, { .name = "doze", .entry = &idle_doze }, }; static int current_mode = 0; static int pasemi_system_reset_exception(struct pt_regs *regs) { /* If we were woken up from power savings, we need to return * to the calling function, since nip is not saved across * all modes. */ if (regs->msr & SRR1_WAKEMASK) regs->nip = regs->link; switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEEE: do_IRQ(regs); break; case SRR1_WAKEDEC: timer_interrupt(regs); break; default: /* do system reset */ return 0; } /* Set higher astate since we come out of power savings at 0 */ restore_astate(hard_smp_processor_id()); /* everything handled */ regs->msr |= MSR_RI; return 1; } static int __init pasemi_idle_init(void) { #ifndef CONFIG_PPC_PASEMI_CPUFREQ printk(KERN_WARNING "No cpufreq driver, powersavings modes disabled\n"); current_mode = 0; #endif ppc_md.system_reset_exception = pasemi_system_reset_exception; ppc_md.power_save = modes[current_mode].entry; printk(KERN_INFO "Using PA6T idle loop (%s)\n", modes[current_mode].name); return 0; } machine_late_initcall(pasemi, pasemi_idle_init); static int __init idle_param(char *p) { int i; for (i = 0; i < ARRAY_SIZE(modes); i++) { if (!strcmp(modes[i].name, p)) { current_mode = i; break; } } return 0; } early_param("idle", idle_param);
gpl-2.0
mbernasocchi/QGIS
src/app/qgsmaptooladdcircularstring.cpp
2
7915
/*************************************************************************** qgsmaptooladdcircularstring.h - map tool for adding circular strings --------------------- begin : December 2014 copyright : (C) 2014 by Marco Hugentobler email : marco dot hugentobler at sourcepole dot ch *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsmaptooladdcircularstring.h" #include "qgscircularstring.h" #include "qgscompoundcurve.h" #include "qgscurvepolygon.h" #include "qgsgeometryrubberband.h" #include "qgsgeometryutils.h" #include "qgslinestring.h" #include "qgsmapcanvas.h" #include "qgspoint.h" #include "qgisapp.h" #include "qgssnapindicator.h" QgsMapToolAddCircularString::QgsMapToolAddCircularString( QgsMapToolCapture *parentTool, QgsMapCanvas *canvas, CaptureMode mode ) : QgsMapToolCapture( canvas, QgisApp::instance()->cadDockWidget(), mode ) , mParentTool( parentTool ) , mShowCenterPointRubberBand( false ) , mSnapIndicator( std::make_unique< QgsSnapIndicator>( canvas ) ) { mToolName = tr( "Add circular string" ); connect( QgisApp::instance(), &QgisApp::newProject, this, &QgsMapToolAddCircularString::stopCapturing ); connect( QgisApp::instance(), &QgisApp::projectRead, this, &QgsMapToolAddCircularString::stopCapturing ); } QgsMapToolAddCircularString::~QgsMapToolAddCircularString() { delete mRubberBand; delete mTempRubberBand; removeCenterPointRubberBand(); } void QgsMapToolAddCircularString::keyPressEvent( QKeyEvent *e ) { if ( e && e->isAutoRepeat() ) { return; } if ( e && e->key() == Qt::Key_R ) { mShowCenterPointRubberBand = true; createCenterPointRubberBand(); } if ( ( e && e->key() == Qt::Key_Escape ) || ( ( e && e->key() == Qt::Key_Backspace ) && ( mPoints.size() == 1 ) ) ) { clean(); if ( mParentTool ) mParentTool->keyPressEvent( e ); } if ( ( e && e->key() == Qt::Key_Backspace ) && ( mPoints.size() > 1 ) ) { mPoints.removeLast(); std::unique_ptr<QgsCircularString> geomRubberBand( new QgsCircularString() ); std::unique_ptr<QgsLineString> geomTempRubberBand( new QgsLineString() ); const int lastPositionCompleteCircularString = mPoints.size() - 1 - ( mPoints.size() + 1 ) % 2 ; geomTempRubberBand->setPoints( mPoints.mid( lastPositionCompleteCircularString ) ); mTempRubberBand->setGeometry( geomTempRubberBand.release() ); if ( mRubberBand ) { geomRubberBand->setPoints( mPoints.mid( 0, lastPositionCompleteCircularString + 1 ) ); mRubberBand->setGeometry( geomRubberBand.release() ); } QgsVertexId idx( 0, 0, ( mPoints.size() + 1 ) % 2 ); if ( mTempRubberBand ) { mTempRubberBand->moveVertex( idx, mPoints.last() ); updateCenterPointRubberBand( mPoints.last() ); } if ( mParentTool ) mParentTool->keyPressEvent( e ); } } void QgsMapToolAddCircularString::keyReleaseEvent( QKeyEvent *e ) { if ( e && e->isAutoRepeat() ) { return; } if ( e && e->key() == Qt::Key_R ) { removeCenterPointRubberBand(); mShowCenterPointRubberBand = false; } } void QgsMapToolAddCircularString::deactivate() { if ( !mParentTool || mPoints.size() < 3 ) { return; } if ( mPoints.size() % 2 == 0 ) //a valid circularstring needs to have an odd number of vertices { mPoints.removeLast(); } QgsCircularString *c = new QgsCircularString(); c->setPoints( mPoints ); mParentTool->addCurve( c ); clean(); QgsMapToolCapture::deactivate(); } void QgsMapToolAddCircularString::activate() { QgsVectorLayer *vLayer = static_cast<QgsVectorLayer *>( QgisApp::instance()->activeLayer() ); if ( vLayer ) mLayerType = vLayer->geometryType(); if ( mParentTool ) { mParentTool->deleteTempRubberBand(); if ( mPoints.isEmpty() ) { // if the parent tool has a curve, use its last point as the first point in this curve const QgsCompoundCurve *compoundCurve = mParentTool->captureCurve(); if ( compoundCurve && compoundCurve->nCurves() > 0 ) { const QgsCurve *curve = compoundCurve->curveAt( compoundCurve->nCurves() - 1 ); if ( curve ) { //mParentTool->captureCurve() is in layer coordinates, but we need map coordinates QgsPoint endPointLayerCoord = curve->endPoint(); QgsPointXY mapPoint = toMapCoordinates( mCanvas->currentLayer(), QgsPointXY( endPointLayerCoord.x(), endPointLayerCoord.y() ) ); mPoints.append( QgsPoint( mapPoint ) ); if ( !mTempRubberBand ) { mTempRubberBand = createGeometryRubberBand( mLayerType, true ); mTempRubberBand->show(); } QgsCircularString *c = new QgsCircularString(); QgsPointSequence rubberBandPoints = mPoints; rubberBandPoints.append( QgsPoint( mapPoint ) ); c->setPoints( rubberBandPoints ); mTempRubberBand->setGeometry( c ); } } } } QgsMapToolCapture::activate(); } void QgsMapToolAddCircularString::createCenterPointRubberBand() { if ( !mShowCenterPointRubberBand || mPoints.size() < 2 || mPoints.size() % 2 != 0 ) { return; } mCenterPointRubberBand = createGeometryRubberBand( QgsWkbTypes::PolygonGeometry ); mCenterPointRubberBand->show(); if ( mTempRubberBand ) { const QgsAbstractGeometry *rubberBandGeom = mTempRubberBand->geometry(); if ( rubberBandGeom ) { QgsVertexId idx( 0, 0, 2 ); QgsPoint pt = rubberBandGeom->vertexAt( idx ); updateCenterPointRubberBand( pt ); } } } void QgsMapToolAddCircularString::updateCenterPointRubberBand( const QgsPoint &pt ) { if ( !mShowCenterPointRubberBand || !mCenterPointRubberBand || mPoints.size() < 2 ) { return; } if ( ( mPoints.size() ) % 2 != 0 ) { return; } //create circular string QgsCircularString *cs = new QgsCircularString(); QgsPointSequence csPoints; csPoints.append( mPoints.at( mPoints.size() - 2 ) ); csPoints.append( mPoints.at( mPoints.size() - 1 ) ); csPoints.append( pt ); cs->setPoints( csPoints ); QgsPoint center; double radius; QgsGeometryUtils::circleCenterRadius( csPoints.at( 0 ), csPoints.at( 1 ), csPoints.at( 2 ), radius, center.rx(), center.ry() ); QgsLineString *segment1 = new QgsLineString(); segment1->addVertex( center ); segment1->addVertex( csPoints.at( 0 ) ); QgsLineString *segment2 = new QgsLineString(); segment2->addVertex( csPoints.at( 2 ) ); segment2->addVertex( center ); QgsCompoundCurve *cc = new QgsCompoundCurve(); cc->addCurve( segment1 ); cc->addCurve( cs ); cc->addCurve( segment2 ); QgsCurvePolygon *cp = new QgsCurvePolygon(); cp->setExteriorRing( cc ); mCenterPointRubberBand->setGeometry( cp ); mCenterPointRubberBand->show(); } void QgsMapToolAddCircularString::removeCenterPointRubberBand() { delete mCenterPointRubberBand; mCenterPointRubberBand = nullptr; } void QgsMapToolAddCircularString::release( QgsMapMouseEvent *e ) { deactivate(); if ( mParentTool ) { mParentTool->canvasReleaseEvent( e ); } activate(); } void QgsMapToolAddCircularString::clean() { mPoints.clear(); delete mRubberBand; mRubberBand = nullptr; delete mTempRubberBand; mTempRubberBand = nullptr; removeCenterPointRubberBand(); }
gpl-2.0
xIchigox/ArkCORE-NG
src/server/scripts/EasternKingdoms/Deadmines/boss_captain_cookie.cpp
2
2044
/* * Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2011-2016 ArkCORE <http://www.arkania.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "deadmines.h" class boss_captain_cookie : public CreatureScript { public: boss_captain_cookie() : CreatureScript("boss_captain_cookie") { } struct boss_captain_cookieAI : public ScriptedAI { boss_captain_cookieAI(Creature* creature) : ScriptedAI(creature) { m_instance = creature->GetInstanceScript(); } InstanceScript* m_instance; EventMap m_events; uint32 m_phase; void Reset() { m_instance->SetData(BOSS_CAPTAIN_COOKIE, NOT_STARTED); m_events.Reset(); m_phase = 0; } void EnterCombat(Unit* /*who*/) { m_instance->SetData(BOSS_CAPTAIN_COOKIE, IN_PROGRESS); } void JustDied(Unit* /*Killer*/) { m_instance->SetData(BOSS_CAPTAIN_COOKIE, DONE); } void UpdateAI(uint32 uiDiff) { if (!UpdateVictim()) return; DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const { return GetDeadminesAI<boss_captain_cookieAI>(creature); } }; void AddSC_boss_captain_cookie() { new boss_captain_cookie(); }
gpl-2.0
hkimura/foedus_code
foedus-core/src/foedus/storage/sequential/sequential_partitioner_impl.cpp
2
2325
/* * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP. * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * HP designates this particular file as subject to the "Classpath" exception * as provided by HP in the LICENSE.txt file that accompanied this code. */ #include "foedus/storage/sequential/sequential_partitioner_impl.hpp" #include <cstring> #include <ostream> namespace foedus { namespace storage { namespace sequential { SequentialPartitioner::SequentialPartitioner(Partitioner* parent) : engine_(parent->get_engine()), id_(parent->get_storage_id()), metadata_(PartitionerMetadata::get_metadata(engine_, id_)) { } ErrorStack SequentialPartitioner::design_partition( const Partitioner::DesignPartitionArguments& /*args*/) { // no data required for SequentialPartitioner metadata_->data_offset_ = 0; metadata_->data_size_ = 0; metadata_->valid_ = true; return kRetOk; } void SequentialPartitioner::partition_batch( const Partitioner::PartitionBatchArguments& args) const { // all local for (uint32_t i = 0; i < args.logs_count_; ++i) { args.results_[i] = args.local_partition_; } } void SequentialPartitioner::sort_batch(const Partitioner::SortBatchArguments& args) const { // no sorting needed. std::memcpy( args.output_buffer_, args.log_positions_, sizeof(snapshot::BufferPosition) * args.logs_count_); *args.written_count_ = args.logs_count_; } std::ostream& operator<<(std::ostream& o, const SequentialPartitioner& /*v*/) { o << "<SequentialPartitioner>" << "</SequentialPartitioner>"; return o; } } // namespace sequential } // namespace storage } // namespace foedus
gpl-2.0
apollos/Quantum-ESPRESSO
lapack-3.2/BLAS/SRC/zdrot.f
2
2692
SUBROUTINE ZDROT( N, CX, INCX, CY, INCY, C, S ) * * .. Scalar Arguments .. INTEGER INCX, INCY, N DOUBLE PRECISION C, S * .. * .. Array Arguments .. COMPLEX*16 CX( * ), CY( * ) * .. * * Purpose * ======= * * Applies a plane rotation, where the cos and sin (c and s) are real * and the vectors cx and cy are complex. * jack dongarra, linpack, 3/11/78. * * Arguments * ========== * * N (input) INTEGER * On entry, N specifies the order of the vectors cx and cy. * N must be at least zero. * Unchanged on exit. * * CX (input) COMPLEX*16 array, dimension at least * ( 1 + ( N - 1 )*abs( INCX ) ). * Before entry, the incremented array CX must contain the n * element vector cx. On exit, CX is overwritten by the updated * vector cx. * * INCX (input) INTEGER * On entry, INCX specifies the increment for the elements of * CX. INCX must not be zero. * Unchanged on exit. * * CY (input) COMPLEX*16 array, dimension at least * ( 1 + ( N - 1 )*abs( INCY ) ). * Before entry, the incremented array CY must contain the n * element vector cy. On exit, CY is overwritten by the updated * vector cy. * * INCY (input) INTEGER * On entry, INCY specifies the increment for the elements of * CY. INCY must not be zero. * Unchanged on exit. * * C (input) DOUBLE PRECISION * On entry, C specifies the cosine, cos. * Unchanged on exit. * * S (input) DOUBLE PRECISION * On entry, S specifies the sine, sin. * Unchanged on exit. * * ===================================================================== * * .. Local Scalars .. INTEGER I, IX, IY COMPLEX*16 CTEMP * .. * .. Executable Statements .. * IF( N.LE.0 ) $ RETURN IF( INCX.EQ.1 .AND. INCY.EQ.1 ) $ GO TO 20 * * code for unequal increments or equal increments not equal * to 1 * IX = 1 IY = 1 IF( INCX.LT.0 ) $ IX = ( -N+1 )*INCX + 1 IF( INCY.LT.0 ) $ IY = ( -N+1 )*INCY + 1 DO 10 I = 1, N CTEMP = C*CX( IX ) + S*CY( IY ) CY( IY ) = C*CY( IY ) - S*CX( IX ) CX( IX ) = CTEMP IX = IX + INCX IY = IY + INCY 10 CONTINUE RETURN * * code for both increments equal to 1 * 20 CONTINUE DO 30 I = 1, N CTEMP = C*CX( I ) + S*CY( I ) CY( I ) = C*CY( I ) - S*CX( I ) CX( I ) = CTEMP 30 CONTINUE RETURN END
gpl-2.0
0x20c24/linux-psec
drivers/clk/clk-mux.c
2
4735
/* * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org> * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Simple multiplexer clock implementation */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> /* * DOC: basic adjustable multiplexer clock that cannot gate * * Traits of this clock: * prepare - clk_prepare only ensures that parents are prepared * enable - clk_enable only ensures that parents are enabled * rate - rate is only affected by parent switching. No clk_set_rate support * parent - parent is adjustable through clk_set_parent */ #define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw) static u8 clk_mux_get_parent(struct clk_hw *hw) { struct clk_mux *mux = to_clk_mux(hw); int num_parents = __clk_get_num_parents(hw->clk); u32 val; /* * FIXME need a mux-specific flag to determine if val is bitwise or numeric * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1 * to 0x7 (index starts at one) * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so * val = 0x4 really means "bit 2, index starts at bit 0" */ val = clk_readl(mux->reg) >> mux->shift; val &= mux->mask; if (mux->table) { int i; for (i = 0; i < num_parents; i++) if (mux->table[i] == val) return i; return -EINVAL; } if (val && (mux->flags & CLK_MUX_INDEX_BIT)) val = ffs(val) - 1; if (val && (mux->flags & CLK_MUX_INDEX_ONE)) val--; if (val >= num_parents) return -EINVAL; return val; } static int clk_mux_set_parent(struct clk_hw *hw, u8 index) { struct clk_mux *mux = to_clk_mux(hw); u32 val; unsigned long flags = 0; if (mux->table) index = mux->table[index]; else { if (mux->flags & CLK_MUX_INDEX_BIT) index = 1 << index; if (mux->flags & CLK_MUX_INDEX_ONE) index++; } if (mux->lock) spin_lock_irqsave(mux->lock, flags); if (mux->flags & CLK_MUX_HIWORD_MASK) { val = mux->mask << (mux->shift + 16); } else { val = clk_readl(mux->reg); val &= ~(mux->mask << mux->shift); } val |= index << mux->shift; clk_writel(val, mux->reg); if (mux->lock) spin_unlock_irqrestore(mux->lock, flags); return 0; } const struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, .determine_rate = __clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_mux_ops); const struct clk_ops clk_mux_ro_ops = { .get_parent = clk_mux_get_parent, }; EXPORT_SYMBOL_GPL(clk_mux_ro_ops); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock) { struct clk_mux *mux; struct clk *clk; struct clk_init_data init; u8 width = 0; if (clk_mux_flags & CLK_MUX_HIWORD_MASK) { width = fls(mask) - ffs(mask) + 1; if (width + shift > 16) { pr_err("mux value exceeds LOWORD field\n"); return ERR_PTR(-EINVAL); } } /* allocate the mux */ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); if (!mux) { pr_err("%s: could not allocate mux clk\n", __func__); return ERR_PTR(-ENOMEM); } init.name = name; if (clk_mux_flags & CLK_MUX_READ_ONLY) init.ops = &clk_mux_ro_ops; else init.ops = &clk_mux_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = parent_names; init.num_parents = num_parents; /* struct clk_mux assignments */ mux->reg = reg; mux->shift = shift; mux->mask = mask; mux->flags = clk_mux_flags; mux->lock = lock; mux->table = table; mux->hw.init = &init; clk = clk_register(dev, &mux->hw); if (IS_ERR(clk)) kfree(mux); return clk; } EXPORT_SYMBOL_GPL(clk_register_mux_table); struct clk *clk_register_mux(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock) { u32 mask = BIT(width) - 1; return clk_register_mux_table(dev, name, parent_names, num_parents, flags, reg, shift, mask, clk_mux_flags, NULL, lock); } EXPORT_SYMBOL_GPL(clk_register_mux); void clk_unregister_mux(struct clk *clk) { struct clk_mux *mux; struct clk_hw *hw; hw = __clk_get_hw(clk); if (!hw) return; mux = to_clk_mux(hw); clk_unregister(clk); kfree(mux); } EXPORT_SYMBOL_GPL(clk_unregister_mux);
gpl-2.0
fzqing/linux-2.6
drivers/video/fbcmap.c
2
7767
/* * linux/drivers/video/fbcmap.c -- Colormap handling for frame buffer devices * * Created 15 Jun 1997 by Geert Uytterhoeven * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/string.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/fb.h> #include <linux/slab.h> #include <asm/uaccess.h> static u16 red2[] = { 0x0000, 0xaaaa }; static u16 green2[] = { 0x0000, 0xaaaa }; static u16 blue2[] = { 0x0000, 0xaaaa }; static u16 red4[] = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 green4[] = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 blue4[] = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 red8[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa }; static u16 green8[] = { 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa }; static u16 blue8[] = { 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa }; static u16 red16[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa, 0x5555, 0x5555, 0x5555, 0x5555, 0xffff, 0xffff, 0xffff, 0xffff }; static u16 green16[] = { 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa, 0x5555, 0x5555, 0xffff, 0xffff, 0x5555, 0x5555, 0xffff, 0xffff }; static u16 blue16[] = { 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff }; static struct fb_cmap default_2_colors = { 0, 2, red2, green2, blue2, NULL }; static struct fb_cmap default_8_colors = { 0, 8, red8, green8, blue8, NULL }; static struct fb_cmap default_4_colors = { 0, 4, red4, green4, blue4, NULL }; static struct fb_cmap default_16_colors = { 0, 16, red16, green16, blue16, NULL }; /** * fb_alloc_cmap - allocate a colormap * @cmap: frame buffer colormap structure * @len: length of @cmap * @transp: boolean, 1 if there is transparency, 0 otherwise * * Allocates memory for a colormap @cmap. @len is the * number of entries in the palette. * * Returns -1 errno on error, or zero on success. * */ int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) { int size = len*sizeof(u16); if (cmap->len != len) { fb_dealloc_cmap(cmap); if (!len) return 0; if (!(cmap->red = kmalloc(size, GFP_ATOMIC))) goto fail; if (!(cmap->green = kmalloc(size, GFP_ATOMIC))) goto fail; if (!(cmap->blue = kmalloc(size, GFP_ATOMIC))) goto fail; if (transp) { if (!(cmap->transp = kmalloc(size, GFP_ATOMIC))) goto fail; } else cmap->transp = NULL; } cmap->start = 0; cmap->len = len; fb_copy_cmap(fb_default_cmap(len), cmap); return 0; fail: fb_dealloc_cmap(cmap); return -1; } /** * fb_dealloc_cmap - deallocate a colormap * @cmap: frame buffer colormap structure * * Deallocates a colormap that was previously allocated with * fb_alloc_cmap(). * */ void fb_dealloc_cmap(struct fb_cmap *cmap) { kfree(cmap->red); kfree(cmap->green); kfree(cmap->blue); kfree(cmap->transp); cmap->red = cmap->green = cmap->blue = cmap->transp = NULL; cmap->len = 0; } /** * fb_copy_cmap - copy a colormap * @from: frame buffer colormap structure * @to: frame buffer colormap structure * * Copy contents of colormap from @from to @to. */ int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to) { int tooff = 0, fromoff = 0; int size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; size = to->len - tooff; if (size > (int) (from->len - fromoff)) size = from->len - fromoff; if (size <= 0) return -EINVAL; size *= sizeof(u16); memcpy(to->red+tooff, from->red+fromoff, size); memcpy(to->green+tooff, from->green+fromoff, size); memcpy(to->blue+tooff, from->blue+fromoff, size); if (from->transp && to->transp) memcpy(to->transp+tooff, from->transp+fromoff, size); return 0; } int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to) { int tooff = 0, fromoff = 0; int size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; size = to->len - tooff; if (size > (int) (from->len - fromoff)) size = from->len - fromoff; if (size <= 0) return -EINVAL; size *= sizeof(u16); if (copy_to_user(to->red+tooff, from->red+fromoff, size)) return -EFAULT; if (copy_to_user(to->green+tooff, from->green+fromoff, size)) return -EFAULT; if (copy_to_user(to->blue+tooff, from->blue+fromoff, size)) return -EFAULT; if (from->transp && to->transp) if (copy_to_user(to->transp+tooff, from->transp+fromoff, size)) return -EFAULT; return 0; } /** * fb_set_cmap - set the colormap * @cmap: frame buffer colormap structure * @info: frame buffer info structure * * Sets the colormap @cmap for a screen of device @info. * * Returns negative errno on error, or zero on success. * */ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info) { int i, start, rc = 0; u16 *red, *green, *blue, *transp; u_int hred, hgreen, hblue, htransp = 0xffff; red = cmap->red; green = cmap->green; blue = cmap->blue; transp = cmap->transp; start = cmap->start; if (start < 0 || !info->fbops->fb_setcolreg) return -EINVAL; for (i = 0; i < cmap->len; i++) { hred = *red++; hgreen = *green++; hblue = *blue++; if (transp) htransp = *transp++; if (info->fbops->fb_setcolreg(start++, hred, hgreen, hblue, htransp, info)) break; } if (rc == 0) fb_copy_cmap(cmap, &info->cmap); return rc; } int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) { int rc, size = cmap->len * sizeof(u16); struct fb_cmap umap; if (cmap->start < 0 || !info->fbops->fb_setcolreg) return -EINVAL; memset(&umap, 0, sizeof(struct fb_cmap)); rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); if (rc) return rc; if (copy_from_user(umap.red, cmap->red, size) || copy_from_user(umap.green, cmap->green, size) || copy_from_user(umap.blue, cmap->blue, size) || (cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) { fb_dealloc_cmap(&umap); return -EFAULT; } umap.start = cmap->start; rc = fb_set_cmap(&umap, info); fb_dealloc_cmap(&umap); return rc; } /** * fb_default_cmap - get default colormap * @len: size of palette for a depth * * Gets the default colormap for a specific screen depth. @len * is the size of the palette for a particular screen depth. * * Returns pointer to a frame buffer colormap structure. * */ struct fb_cmap *fb_default_cmap(int len) { if (len <= 2) return &default_2_colors; if (len <= 4) return &default_4_colors; if (len <= 8) return &default_8_colors; return &default_16_colors; } /** * fb_invert_cmaps - invert all defaults colormaps * * Invert all default colormaps. * */ void fb_invert_cmaps(void) { u_int i; for (i = 0; i < 2; i++) { red2[i] = ~red2[i]; green2[i] = ~green2[i]; blue2[i] = ~blue2[i]; } for (i = 0; i < 4; i++) { red4[i] = ~red4[i]; green4[i] = ~green4[i]; blue4[i] = ~blue4[i]; } for (i = 0; i < 8; i++) { red8[i] = ~red8[i]; green8[i] = ~green8[i]; blue8[i] = ~blue8[i]; } for (i = 0; i < 16; i++) { red16[i] = ~red16[i]; green16[i] = ~green16[i]; blue16[i] = ~blue16[i]; } } /* * Visible symbols for modules */ EXPORT_SYMBOL(fb_alloc_cmap); EXPORT_SYMBOL(fb_dealloc_cmap); EXPORT_SYMBOL(fb_copy_cmap); EXPORT_SYMBOL(fb_set_cmap); EXPORT_SYMBOL(fb_default_cmap); EXPORT_SYMBOL(fb_invert_cmaps);
gpl-2.0
yellowtd/SRB2-PLUS
src/p_spec.c
2
223797
// SONIC ROBO BLAST 2 //----------------------------------------------------------------------------- // Copyright (C) 1993-1996 by id Software, Inc. // Copyright (C) 1998-2000 by DooM Legacy Team. // Copyright (C) 1999-2016 by Sonic Team Junior. // // This program is free software distributed under the // terms of the GNU General Public License, version 2. // See the 'LICENSE' file for more details. //----------------------------------------------------------------------------- /// \file p_spec.c /// \brief Implements special effects: /// Texture animation, height or lighting changes /// according to adjacent sectors, respective /// utility functions, etc. /// Line Tag handling. Line and Sector triggers. #include "doomdef.h" #include "g_game.h" #include "p_local.h" #include "p_setup.h" // levelflats for flat animation #include "r_data.h" #include "m_random.h" #include "p_mobj.h" #include "i_system.h" #include "s_sound.h" #include "w_wad.h" #include "z_zone.h" #include "r_main.h" //Two extra includes. #include "r_sky.h" #include "p_polyobj.h" #include "p_slopes.h" #include "hu_stuff.h" #include "m_misc.h" #include "m_cond.h" //unlock triggers #include "lua_hook.h" // LUAh_LinedefExecute #ifdef HW3SOUND #include "hardware/hw3sound.h" #endif // Not sure if this is necessary, but it was in w_wad.c, so I'm putting it here too -Shadow Hog #ifdef _WIN32_WCE #define AVOID_ERRNO #else #include <errno.h> #endif mobj_t *skyboxmo[2]; // Amount (dx, dy) vector linedef is shifted right to get scroll amount #define SCROLL_SHIFT 5 /** Animated texture descriptor * This keeps track of an animated texture or an animated flat. * \sa P_UpdateSpecials, P_InitPicAnims, animdef_t */ typedef struct { SINT8 istexture; ///< ::true for a texture, ::false for a flat INT32 picnum; ///< The end flat number INT32 basepic; ///< The start flat number INT32 numpics; ///< Number of frames in the animation tic_t speed; ///< Number of tics for which each frame is shown } anim_t; #if defined(_MSC_VER) #pragma pack(1) #endif /** Animated texture definition. * Used for ::harddefs and for loading an ANIMATED lump from a wad. * * Animations are defined by the first and last frame (i.e., flat or texture). * The animation sequence uses all flats between the start and end entry, in * the order found in the wad. * * \sa anim_t */ typedef struct { SINT8 istexture; ///< True for a texture, false for a flat. char endname[9]; ///< Name of the last frame, null-terminated. char startname[9]; ///< Name of the first frame, null-terminated. INT32 speed ; ///< Number of tics for which each frame is shown. } ATTRPACK animdef_t; #if defined(_MSC_VER) #pragma pack() #endif typedef struct { UINT32 count; thinker_t **thinkers; } thinkerlist_t; static void P_SearchForDisableLinedefs(void); static void P_SpawnScrollers(void); static void P_SpawnFriction(void); static void P_SpawnPushers(void); static void Add_Pusher(pushertype_e type, fixed_t x_mag, fixed_t y_mag, mobj_t *source, INT32 affectee, INT32 referrer, INT32 exclusive, INT32 slider); //SoM: 3/9/2000 static void Add_MasterDisappearer(tic_t appeartime, tic_t disappeartime, tic_t offset, INT32 line, INT32 sourceline); static void P_AddBlockThinker(sector_t *sec, line_t *sourceline); static void P_AddFloatThinker(sector_t *sec, INT32 tag, line_t *sourceline); //static void P_AddBridgeThinker(line_t *sourceline, sector_t *sec); static void P_AddFakeFloorsByLine(size_t line, ffloortype_e ffloorflags, thinkerlist_t *secthinkers); static void P_ProcessLineSpecial(line_t *line, mobj_t *mo, sector_t *callsec); static void Add_Friction(INT32 friction, INT32 movefactor, INT32 affectee, INT32 referrer); static void P_AddSpikeThinker(sector_t *sec, INT32 referrer); //SoM: 3/7/2000: New sturcture without limits. static anim_t *lastanim; static anim_t *anims = NULL; /// \todo free leak static size_t maxanims; // // P_InitPicAnims // /** Hardcoded animation sequences. * Used if no ANIMATED lump is found in a loaded wad. */ static animdef_t harddefs[] = { // flat animations. {false, "LITEY3", "LITEY1", 4}, {false, "FWATER16", "FWATER1", 4}, {false, "BWATER16", "BWATER01", 4}, {false, "LWATER16", "LWATER1", 4}, {false, "WATER7", "WATER0", 4}, {false, "LAVA4", "LAVA1", 8}, {false, "DLAVA4", "DLAVA1", 8}, {false, "RLAVA8", "RLAVA1", 8}, {false, "LITER3", "LITER1", 8}, {false, "SURF08", "SURF01", 4}, {false, "CHEMG16", "CHEMG01", 4}, // THZ Chemical gunk {false, "GOOP16", "GOOP01", 4}, // Green chemical gunk {false, "OIL16", "OIL01", 4}, // Oil {false, "THZBOXF4", "THZBOXF1", 2}, // Moved up with the flats {false, "ALTBOXF4", "ALTBOXF1", 2}, {false, "LITEB3", "LITEB1", 4}, {false, "LITEN3", "LITEN1", 4}, {false, "ACZRFL1H", "ACZRFL1A", 4}, {false, "ACZRFL2H", "ACZRFL2A", 4}, {false, "EGRIDF3", "EGRIDF1", 4}, {false, "ERZFAN4", "ERZFAN1", 1}, {false, "ERZFANR4", "ERZFANR1", 1}, {false, "DISCO4", "DISCO1", 15}, // animated textures {true, "GFALL4", "GFALL1", 2}, // Short waterfall {true, "CFALL4", "CFALL1", 2}, // Long waterfall {true, "TFALL4", "TFALL1", 2}, // THZ Chemical fall {true, "AFALL4", "AFALL1", 2}, // Green Chemical fall {true, "QFALL4", "QFALL1", 2}, // Quicksand fall {true, "Q2FALL4", "Q2FALL1", 2}, {true, "Q3FALL4", "Q3FALL1", 2}, {true, "Q4FALL4", "Q4FALL1", 2}, {true, "Q5FALL4", "Q5FALL1", 2}, {true, "Q6FALL4", "Q6FALL1", 2}, {true, "Q7FALL4", "Q7FALL1", 2}, {true, "LFALL4", "LFALL1", 2}, {true, "MFALL4", "MFALL1", 2}, {true, "OFALL4", "OFALL1", 2}, {true, "DLAVA4", "DLAVA1", 8}, {true, "ERZLASA2", "ERZLASA1", 1}, {true, "ERZLASB4", "ERZLASB1", 1}, {true, "ERZLASC4", "ERZLASC1", 1}, {true, "THZBOX04", "THZBOX01", 2}, {true, "ALTBOX04", "ALTBOX01", 2}, {true, "SFALL4", "SFALL1", 4}, // Lava fall {true, "RVZFALL8", "RVZFALL1", 4}, {true, "BFALL4", "BFALL1", 2}, // HPZ waterfall {true, "GREYW3", "GREYW1", 4}, {true, "BLUEW3", "BLUEW1", 4}, {true, "COMP6", "COMP4", 4}, {true, "RED3", "RED1", 4}, {true, "YEL3", "YEL1", 4}, {true, "ACWRFL1D", "ACWRFL1A", 1}, {true, "ACWRFL2D", "ACWRFL2A", 1}, {true, "ACWRFL3D", "ACWRFL3A", 1}, {true, "ACWRFL4D", "ACWRFL4A", 1}, {true, "ACWRP1D", "ACWRP1A", 1}, {true, "ACWRP2D", "ACWRP2A", 1}, {true, "ACZRP1D", "ACZRP1A", 1}, {true, "ACZRP2D", "ACZRP2A", 1}, {true, "OILFALL4", "OILFALL1", 2}, {true, "SOLFALL4", "SOLFALL1", 2}, {true, "DOWN1C", "DOWN1A", 4}, {true, "DOWN2C", "DOWN2A", 4}, {true, "DOWN3D", "DOWN3A", 4}, {true, "DOWN4C", "DOWN4A", 4}, {true, "DOWN5C", "DOWN5A", 4}, {true, "UP1C", "UP1A", 4}, {true, "UP2C", "UP2A", 4}, {true, "UP3D", "UP3A", 4}, {true, "UP4C", "UP4A", 4}, {true, "UP5C", "UP5A", 4}, {true, "EGRID3", "EGRID1", 4}, {true, "ERFANW4", "ERFANW1", 1}, {true, "ERFANX4", "ERFANX1", 1}, {true, "DISCOD4", "DISCOD1", 15}, {true, "DANCE4", "DANCE1", 8}, {true, "SKY135", "SKY132", 2}, {true, "APPLMS4", "APPLMS1", 2}, {true, "APBOXW3", "APBOXW1", 2}, {true, "ERZLAZC4", "ERZLAZC1", 4}, // End of line { -1, "", "", 0}, }; // Animating line specials // Init animated textures // - now called at level loading P_SetupLevel() static animdef_t *animdefs = NULL; // A prototype; here instead of p_spec.h, so they're "private" void P_ParseANIMDEFSLump(INT32 wadNum, UINT16 lumpnum, INT32 *i); void P_ParseAnimationDefintion(SINT8 istexture, INT32 *i); /** Sets up texture and flat animations. * * Converts an ::animdef_t array loaded from ::harddefs or a lump into * ::anim_t format. * * Issues an error if any animation cycles are invalid. * * \sa P_FindAnimatedFlat, P_SetupLevelFlatAnims * \author Steven McGranahan (original), Shadow Hog (had to rewrite it to handle multiple WADs) */ void P_InitPicAnims(void) { // Init animation INT32 i; // Position in the animdefs array INT32 w; // WAD UINT8 *wadAnimdefs; // not to be confused with animdefs, the combined total of every ANIMATED lump in every WAD, or ANIMDEFS, the ZDoom lump I intend to implement later UINT8 *currentPos; if (W_CheckNumForName("ANIMATED") != LUMPERROR || W_CheckNumForName("ANIMDEFS") != LUMPERROR) { if (animdefs) { Z_Free(animdefs); animdefs = NULL; } for (w = 0, i = 0, maxanims = 0; w < numwadfiles; w++) { UINT16 animatedLumpNum; UINT16 animdefsLumpNum; // Find ANIMATED lump in the WAD animatedLumpNum = W_CheckNumForNamePwad("ANIMATED", w, 0); if (animatedLumpNum != INT16_MAX) { wadAnimdefs = (UINT8 *)W_CacheLumpNumPwad(w, animatedLumpNum, PU_STATIC); // Get the number of animations in the file for (currentPos = wadAnimdefs; *currentPos != UINT8_MAX; maxanims++, currentPos+=23); // Resize animdefs (or if it hasn't been created, create it) animdefs = (animdef_t *)Z_Realloc(animdefs, sizeof(animdef_t)*(maxanims + 1), PU_STATIC, NULL); // Sanity check it if (!animdefs) { I_Error("Not enough free memory for ANIMATED data"); } // Populate the new array for (currentPos = wadAnimdefs; *currentPos != UINT8_MAX; i++, currentPos+=23) { M_Memcpy(&(animdefs[i].istexture), currentPos, 1); // istexture, 1 byte M_Memcpy(animdefs[i].endname, (currentPos + 1), 9); // endname, 9 bytes M_Memcpy(animdefs[i].startname, (currentPos + 10), 9); // startname, 9 bytes M_Memcpy(&(animdefs[i].speed), (currentPos + 19), 4); // speed, 4 bytes } Z_Free(wadAnimdefs); } // Now find ANIMDEFS animdefsLumpNum = W_CheckNumForNamePwad("ANIMDEFS", w, 0); if (animdefsLumpNum != INT16_MAX) { P_ParseANIMDEFSLump(w, animdefsLumpNum, &i); } } // Define the last one animdefs[maxanims].istexture = -1; strncpy(animdefs[maxanims].endname, "", 9); strncpy(animdefs[maxanims].startname, "", 9); animdefs[maxanims].speed = 0; } else { animdefs = harddefs; for (maxanims = 0; animdefs[maxanims].istexture != -1; maxanims++); } if (anims) free(anims); anims = (anim_t *)malloc(sizeof (*anims)*(maxanims + 1)); if (!anims) I_Error("Not enough free memory for ANIMATED data"); lastanim = anims; for (i = 0; animdefs[i].istexture != -1; i++) { if (animdefs[i].istexture) { if (R_CheckTextureNumForName(animdefs[i].startname) == -1) continue; lastanim->picnum = R_TextureNumForName(animdefs[i].endname); lastanim->basepic = R_TextureNumForName(animdefs[i].startname); } else { if ((W_CheckNumForName(animdefs[i].startname)) == LUMPERROR) continue; lastanim->picnum = R_FlatNumForName(animdefs[i].endname); lastanim->basepic = R_FlatNumForName(animdefs[i].startname); } lastanim->istexture = animdefs[i].istexture; lastanim->numpics = lastanim->picnum - lastanim->basepic + 1; if (lastanim->numpics < 2) { free(anims); I_Error("P_InitPicAnims: bad cycle from %s to %s", animdefs[i].startname, animdefs[i].endname); } if (animdefs == harddefs) lastanim->speed = animdefs[i].speed; else lastanim->speed = LONG(animdefs[i].speed); lastanim++; } lastanim->istexture = -1; R_ClearTextureNumCache(false); if (animdefs != harddefs) Z_ChangeTag(animdefs, PU_CACHE); } void P_ParseANIMDEFSLump(INT32 wadNum, UINT16 lumpnum, INT32 *i) { char *animdefsLump; size_t animdefsLumpLength; char *animdefsText; char *animdefsToken; // Since lumps AREN'T \0-terminated like I'd assumed they should be, I'll // need to make a space of memory where I can ensure that it will terminate // correctly. Start by loading the relevant data from the WAD. animdefsLump = (char *)W_CacheLumpNumPwad(wadNum,lumpnum,PU_STATIC); // If that didn't exist, we have nothing to do here. if (animdefsLump == NULL) return; // If we're still here, then it DOES exist; figure out how long it is, and allot memory accordingly. animdefsLumpLength = W_LumpLengthPwad(wadNum,lumpnum); animdefsText = (char *)Z_Malloc((animdefsLumpLength+1)*sizeof(char),PU_STATIC,NULL); // Now move the contents of the lump into this new location. memmove(animdefsText,animdefsLump,animdefsLumpLength); // Make damn well sure the last character in our new memory location is \0. animdefsText[animdefsLumpLength] = '\0'; // Finally, free up the memory from the first data load, because we really // don't need it. Z_Free(animdefsLump); // Now, let's start parsing this thing animdefsToken = M_GetToken(animdefsText); while (animdefsToken != NULL) { if (stricmp(animdefsToken, "TEXTURE") == 0) { Z_Free(animdefsToken); P_ParseAnimationDefintion(1, i); } else if (stricmp(animdefsToken, "FLAT") == 0) { Z_Free(animdefsToken); P_ParseAnimationDefintion(0, i); } else if (stricmp(animdefsToken, "OSCILLATE") == 0) { // This probably came off the tail of an earlier definition. It's technically legal syntax, but we don't support it. I_Error("Error parsing ANIMDEFS lump: Animation definitions utilizing \"OSCILLATE\" (the animation plays in reverse when it reaches the end) are not supported by SRB2"); } else { I_Error("Error parsing ANIMDEFS lump: Expected \"TEXTURE\" or \"FLAT\", got \"%s\"",animdefsToken); } animdefsToken = M_GetToken(NULL); } Z_Free(animdefsToken); Z_Free((void *)animdefsText); } void P_ParseAnimationDefintion(SINT8 istexture, INT32 *i) { char *animdefsToken; size_t animdefsTokenLength; char *endPos; INT32 animSpeed; // Increase the size to make room for the new animation definition maxanims++; animdefs = (animdef_t *)Z_Realloc(animdefs, sizeof(animdef_t)*(maxanims + 1), PU_STATIC, NULL); animdefs[*i].istexture = istexture; // Startname animdefsToken = M_GetToken(NULL); if (animdefsToken == NULL) { I_Error("Error parsing ANIMDEFS lump: Unexpected end of file where start texture/flat name should be"); } if (stricmp(animdefsToken, "OPTIONAL") == 0) { // This is meaningful to ZDoom - it tells the program NOT to bomb out // if the textures can't be found - but it's useless in SRB2, so we'll // just smile, nod, and carry on Z_Free(animdefsToken); animdefsToken = M_GetToken(NULL); if (animdefsToken == NULL) { I_Error("Error parsing ANIMDEFS lump: Unexpected end of file where start texture/flat name should be"); } else if (stricmp(animdefsToken, "RANGE") == 0) { // Oh. Um. Apparently "OPTIONAL" is a texture name. Naughty. // I should probably handle this more gracefully, but right now // I can't be bothered; especially since ZDoom doesn't handle this // condition at all. I_Error("Error parsing ANIMDEFS lump: \"OPTIONAL\" is a keyword; you cannot use it as the startname of an animation"); } } animdefsTokenLength = strlen(animdefsToken); if (animdefsTokenLength>8) { I_Error("Error parsing ANIMDEFS lump: lump name \"%s\" exceeds 8 characters", animdefsToken); } strncpy(animdefs[*i].startname, animdefsToken, 9); Z_Free(animdefsToken); // "RANGE" animdefsToken = M_GetToken(NULL); if (animdefsToken == NULL) { I_Error("Error parsing ANIMDEFS lump: Unexpected end of file where \"RANGE\" after \"%s\"'s startname should be", animdefs[*i].startname); } if (stricmp(animdefsToken, "ALLOWDECALS") == 0) { // Another ZDoom keyword, ho-hum. Skip it, move on to the next token. Z_Free(animdefsToken); animdefsToken = M_GetToken(NULL); } if (stricmp(animdefsToken, "PIC") == 0) { // This is technically legitimate ANIMDEFS syntax, but SRB2 doesn't support it. I_Error("Error parsing ANIMDEFS lump: Animation definitions utilizing \"PIC\" (specific frames instead of a consecutive range) are not supported by SRB2"); } if (stricmp(animdefsToken, "RANGE") != 0) { I_Error("Error parsing ANIMDEFS lump: Expected \"RANGE\" after \"%s\"'s startname, got \"%s\"", animdefs[*i].startname, animdefsToken); } Z_Free(animdefsToken); // Endname animdefsToken = M_GetToken(NULL); if (animdefsToken == NULL) { I_Error("Error parsing ANIMDEFS lump: Unexpected end of file where \"%s\"'s end texture/flat name should be", animdefs[*i].startname); } animdefsTokenLength = strlen(animdefsToken); if (animdefsTokenLength>8) { I_Error("Error parsing ANIMDEFS lump: lump name \"%s\" exceeds 8 characters", animdefsToken); } strncpy(animdefs[*i].endname, animdefsToken, 9); Z_Free(animdefsToken); // "TICS" animdefsToken = M_GetToken(NULL); if (animdefsToken == NULL) { I_Error("Error parsing ANIMDEFS lump: Unexpected end of file where \"%s\"'s \"TICS\" should be", animdefs[*i].startname); } if (stricmp(animdefsToken, "RAND") == 0) { // This is technically legitimate ANIMDEFS syntax, but SRB2 doesn't support it. I_Error("Error parsing ANIMDEFS lump: Animation definitions utilizing \"RAND\" (random duration per frame) are not supported by SRB2"); } if (stricmp(animdefsToken, "TICS") != 0) { I_Error("Error parsing ANIMDEFS lump: Expected \"TICS\" in animation definition for \"%s\", got \"%s\"", animdefs[*i].startname, animdefsToken); } Z_Free(animdefsToken); // Speed animdefsToken = M_GetToken(NULL); if (animdefsToken == NULL) { I_Error("Error parsing TEXTURES lump: Unexpected end of file where \"%s\"'s animation speed should be", animdefs[*i].startname); } endPos = NULL; #ifndef AVOID_ERRNO errno = 0; #endif animSpeed = strtol(animdefsToken,&endPos,10); if (endPos == animdefsToken // Empty string || *endPos != '\0' // Not end of string #ifndef AVOID_ERRNO || errno == ERANGE // Number out-of-range #endif || animSpeed < 0) // Number is not positive { I_Error("Error parsing TEXTURES lump: Expected a positive integer for \"%s\"'s animation speed, got \"%s\"", animdefs[*i].startname, animdefsToken); } animdefs[*i].speed = animSpeed; Z_Free(animdefsToken); // Increment i before we go, so this doesn't cause issues later (*i)++; } /** Checks for flats in levelflats that are part of a flat animation sequence * and sets them up for animation. * * \param animnum Index into ::anims to find flats for. * \sa P_SetupLevelFlatAnims */ static inline void P_FindAnimatedFlat(INT32 animnum) { size_t i; lumpnum_t startflatnum, endflatnum; levelflat_t *foundflats; foundflats = levelflats; startflatnum = anims[animnum].basepic; endflatnum = anims[animnum].picnum; // note: high word of lumpnum is the wad number if ((startflatnum>>16) != (endflatnum>>16)) I_Error("AnimatedFlat start %s not in same wad as end %s\n", animdefs[animnum].startname, animdefs[animnum].endname); // // now search through the levelflats if this anim flat sequence is used // for (i = 0; i < numlevelflats; i++, foundflats++) { // is that levelflat from the flat anim sequence ? if (foundflats->lumpnum >= startflatnum && foundflats->lumpnum <= endflatnum) { foundflats->baselumpnum = startflatnum; foundflats->animseq = foundflats->lumpnum - startflatnum; foundflats->numpics = endflatnum - startflatnum + 1; foundflats->speed = anims[animnum].speed; CONS_Debug(DBG_SETUP, "animflat: #%03d name:%.8s animseq:%d numpics:%d speed:%d\n", atoi(sizeu1(i)), foundflats->name, foundflats->animseq, foundflats->numpics,foundflats->speed); } } } /** Sets up all flats used in a level. * * \sa P_InitPicAnims, P_FindAnimatedFlat */ void P_SetupLevelFlatAnims(void) { INT32 i; // the original game flat anim sequences for (i = 0; anims[i].istexture != -1; i++) { if (!anims[i].istexture) P_FindAnimatedFlat(i); } } // // UTILITIES // #if 0 /** Gets a side from a sector line. * * \param currentSector Sector the line is in. * \param line Index of the line within the sector. * \param side 0 for front, 1 for back. * \return Pointer to the side_t of the side you want. * \sa getSector, twoSided, getNextSector */ static inline side_t *getSide(INT32 currentSector, INT32 line, INT32 side) { return &sides[(sectors[currentSector].lines[line])->sidenum[side]]; } /** Gets a sector from a sector line. * * \param currentSector Sector the line is in. * \param line Index of the line within the sector. * \param side 0 for front, 1 for back. * \return Pointer to the ::sector_t of the sector on that side. * \sa getSide, twoSided, getNextSector */ static inline sector_t *getSector(INT32 currentSector, INT32 line, INT32 side) { return sides[(sectors[currentSector].lines[line])->sidenum[side]].sector; } /** Determines whether a sector line is two-sided. * Uses the Boom method, checking if the line's back side is set to -1, rather * than looking for ::ML_TWOSIDED. * * \param sector The sector. * \param line Line index within the sector. * \return 1 if the sector is two-sided, 0 otherwise. * \sa getSide, getSector, getNextSector */ static inline boolean twoSided(INT32 sector, INT32 line) { return (sectors[sector].lines[line])->sidenum[1] != 0xffff; } #endif /** Finds sector next to current. * * \param line Pointer to the line to cross. * \param sec Pointer to the current sector. * \return Pointer to a ::sector_t of the adjacent sector, or NULL if the line * is one-sided. * \sa getSide, getSector, twoSided * \author Steven McGranahan */ static sector_t *getNextSector(line_t *line, sector_t *sec) { if (line->frontsector == sec) { if (line->backsector != sec) return line->backsector; else return NULL; } return line->frontsector; } /** Finds lowest floor in adjacent sectors. * * \param sec Sector to start in. * \return Lowest floor height in an adjacent sector. * \sa P_FindHighestFloorSurrounding, P_FindNextLowestFloor, * P_FindLowestCeilingSurrounding */ fixed_t P_FindLowestFloorSurrounding(sector_t *sec) { size_t i; line_t *check; sector_t *other; fixed_t floorh; floorh = sec->floorheight; for (i = 0; i < sec->linecount; i++) { check = sec->lines[i]; other = getNextSector(check,sec); if (!other) continue; if (other->floorheight < floorh) floorh = other->floorheight; } return floorh; } /** Finds highest floor in adjacent sectors. * * \param sec Sector to start in. * \return Highest floor height in an adjacent sector. * \sa P_FindLowestFloorSurrounding, P_FindNextHighestFloor, * P_FindHighestCeilingSurrounding */ fixed_t P_FindHighestFloorSurrounding(sector_t *sec) { size_t i; line_t *check; sector_t *other; fixed_t floorh = -500*FRACUNIT; INT32 foundsector = 0; for (i = 0; i < sec->linecount; i++) { check = sec->lines[i]; other = getNextSector(check, sec); if (!other) continue; if (other->floorheight > floorh || !foundsector) floorh = other->floorheight; if (!foundsector) foundsector = 1; } return floorh; } /** Finds next highest floor in adjacent sectors. * * \param sec Sector to start in. * \param currentheight Height to start at. * \return Next highest floor height in an adjacent sector, or currentheight * if there are none higher. * \sa P_FindHighestFloorSurrounding, P_FindNextLowestFloor, * P_FindNextHighestCeiling * \author Lee Killough */ fixed_t P_FindNextHighestFloor(sector_t *sec, fixed_t currentheight) { sector_t *other; size_t i; fixed_t height; for (i = 0; i < sec->linecount; i++) { other = getNextSector(sec->lines[i],sec); if (other && other->floorheight > currentheight) { height = other->floorheight; while (++i < sec->linecount) { other = getNextSector(sec->lines[i], sec); if (other && other->floorheight < height && other->floorheight > currentheight) height = other->floorheight; } return height; } } return currentheight; } //////////////////////////////////////////////////// // SoM: Start new Boom functions //////////////////////////////////////////////////// /** Finds next lowest floor in adjacent sectors. * * \param sec Sector to start in. * \param currentheight Height to start at. * \return Next lowest floor height in an adjacent sector, or currentheight * if there are none lower. * \sa P_FindLowestFloorSurrounding, P_FindNextHighestFloor, * P_FindNextLowestCeiling * \author Lee Killough */ fixed_t P_FindNextLowestFloor(sector_t *sec, fixed_t currentheight) { sector_t *other; size_t i; fixed_t height; for (i = 0; i < sec->linecount; i++) { other = getNextSector(sec->lines[i], sec); if (other && other->floorheight < currentheight) { height = other->floorheight; while (++i < sec->linecount) { other = getNextSector(sec->lines[i], sec); if (other && other->floorheight > height && other->floorheight < currentheight) height = other->floorheight; } return height; } } return currentheight; } #if 0 /** Finds next lowest ceiling in adjacent sectors. * * \param sec Sector to start in. * \param currentheight Height to start at. * \return Next lowest ceiling height in an adjacent sector, or currentheight * if there are none lower. * \sa P_FindLowestCeilingSurrounding, P_FindNextHighestCeiling, * P_FindNextLowestFloor * \author Lee Killough */ static fixed_t P_FindNextLowestCeiling(sector_t *sec, fixed_t currentheight) { sector_t *other; size_t i; fixed_t height; for (i = 0; i < sec->linecount; i++) { other = getNextSector(sec->lines[i],sec); if (other && other->ceilingheight < currentheight) { height = other->ceilingheight; while (++i < sec->linecount) { other = getNextSector(sec->lines[i],sec); if (other && other->ceilingheight > height && other->ceilingheight < currentheight) height = other->ceilingheight; } return height; } } return currentheight; } /** Finds next highest ceiling in adjacent sectors. * * \param sec Sector to start in. * \param currentheight Height to start at. * \return Next highest ceiling height in an adjacent sector, or currentheight * if there are none higher. * \sa P_FindHighestCeilingSurrounding, P_FindNextLowestCeiling, * P_FindNextHighestFloor * \author Lee Killough */ static fixed_t P_FindNextHighestCeiling(sector_t *sec, fixed_t currentheight) { sector_t *other; size_t i; fixed_t height; for (i = 0; i < sec->linecount; i++) { other = getNextSector(sec->lines[i], sec); if (other && other->ceilingheight > currentheight) { height = other->ceilingheight; while (++i < sec->linecount) { other = getNextSector(sec->lines[i],sec); if (other && other->ceilingheight < height && other->ceilingheight > currentheight) height = other->ceilingheight; } return height; } } return currentheight; } #endif //////////////////////////// // End New Boom functions //////////////////////////// /** Finds lowest ceiling in adjacent sectors. * * \param sec Sector to start in. * \return Lowest ceiling height in an adjacent sector. * \sa P_FindHighestCeilingSurrounding, P_FindNextLowestCeiling, * P_FindLowestFloorSurrounding */ fixed_t P_FindLowestCeilingSurrounding(sector_t *sec) { size_t i; line_t *check; sector_t *other; fixed_t height = 32000*FRACUNIT; //SoM: 3/7/2000: Remove ovf INT32 foundsector = 0; for (i = 0; i < sec->linecount; i++) { check = sec->lines[i]; other = getNextSector(check, sec); if (!other) continue; if (other->ceilingheight < height || !foundsector) height = other->ceilingheight; if (!foundsector) foundsector = 1; } return height; } /** Finds Highest ceiling in adjacent sectors. * * \param sec Sector to start in. * \return Highest ceiling height in an adjacent sector. * \sa P_FindLowestCeilingSurrounding, P_FindNextHighestCeiling, * P_FindHighestFloorSurrounding */ fixed_t P_FindHighestCeilingSurrounding(sector_t *sec) { size_t i; line_t *check; sector_t *other; fixed_t height = 0; INT32 foundsector = 0; for (i = 0; i < sec->linecount; i++) { check = sec->lines[i]; other = getNextSector(check, sec); if (!other) continue; if (other->ceilingheight > height || !foundsector) height = other->ceilingheight; if (!foundsector) foundsector = 1; } return height; } #if 0 //SoM: 3/7/2000: UTILS..... // // P_FindShortestTextureAround() // // Passed a sector number, returns the shortest lower texture on a // linedef bounding the sector. // // static fixed_t P_FindShortestTextureAround(INT32 secnum) { fixed_t minsize = 32000<<FRACBITS; side_t *side; size_t i; sector_t *sec= &sectors[secnum]; for (i = 0; i < sec->linecount; i++) { if (twoSided(secnum, i)) { side = getSide(secnum,i,0); if (side->bottomtexture > 0) if (textureheight[side->bottomtexture] < minsize) minsize = textureheight[side->bottomtexture]; side = getSide(secnum,i,1); if (side->bottomtexture > 0) if (textureheight[side->bottomtexture] < minsize) minsize = textureheight[side->bottomtexture]; } } return minsize; } //SoM: 3/7/2000: Stuff.... (can you tell I'm getting tired? It's 12 : 30!) // // P_FindShortestUpperAround() // // Passed a sector number, returns the shortest upper texture on a // linedef bounding the sector. // // static fixed_t P_FindShortestUpperAround(INT32 secnum) { fixed_t minsize = 32000<<FRACBITS; side_t *side; size_t i; sector_t *sec = &sectors[secnum]; for (i = 0; i < sec->linecount; i++) { if (twoSided(secnum, i)) { side = getSide(secnum,i,0); if (side->toptexture > 0) if (textureheight[side->toptexture] < minsize) minsize = textureheight[side->toptexture]; side = getSide(secnum,i,1); if (side->toptexture > 0) if (textureheight[side->toptexture] < minsize) minsize = textureheight[side->toptexture]; } } return minsize; } //SoM: 3/7/2000 // // P_FindModelFloorSector() // // Passed a floor height and a sector number, return a pointer to a // a sector with that floor height across the lowest numbered two sided // line surrounding the sector. // // Note: If no sector at that height bounds the sector passed, return NULL // // static sector_t *P_FindModelFloorSector(fixed_t floordestheight, INT32 secnum) { size_t i; sector_t *sec = &sectors[secnum]; for (i = 0; i < sec->linecount; i++) { if (twoSided(secnum, i)) { if (getSide(secnum,i,0)->sector-sectors == secnum) sec = getSector(secnum,i,1); else sec = getSector(secnum,i,0); if (sec->floorheight == floordestheight) return sec; } } return NULL; } // // P_FindModelCeilingSector() // // Passed a ceiling height and a sector number, return a pointer to a // a sector with that ceiling height across the lowest numbered two sided // line surrounding the sector. // // Note: If no sector at that height bounds the sector passed, return NULL // static sector_t *P_FindModelCeilingSector(fixed_t ceildestheight, INT32 secnum) { size_t i; sector_t *sec = &sectors[secnum]; for (i = 0; i < sec->linecount; i++) { if (twoSided(secnum, i)) { if (getSide(secnum, i, 0)->sector - sectors == secnum) sec = getSector(secnum, i, 1); else sec = getSector(secnum, i, 0); if (sec->ceilingheight == ceildestheight) return sec; } } return NULL; } #endif /** Searches the tag lists for the next sector tagged to a line. * * \param line Tagged line used as a reference. * \param start -1 to start at the beginning, or the result of a previous call * to keep searching. * \return Number of the next tagged sector found. * \sa P_FindSectorFromTag, P_FindLineFromLineTag */ INT32 P_FindSectorFromLineTag(line_t *line, INT32 start) { if (line->tag == -1) { start++; if (start >= (INT32)numsectors) return -1; return start; } else { start = start >= 0 ? sectors[start].nexttag : sectors[(unsigned)line->tag % numsectors].firsttag; while (start >= 0 && sectors[start].tag != line->tag) start = sectors[start].nexttag; return start; } } /** Searches the tag lists for the next sector with a given tag. * * \param tag Tag number to look for. * \param start -1 to start anew, or the result of a previous call to keep * searching. * \return Number of the next tagged sector found. * \sa P_FindSectorFromLineTag */ INT32 P_FindSectorFromTag(INT16 tag, INT32 start) { if (tag == -1) { start++; if (start >= (INT32)numsectors) return -1; return start; } else { start = start >= 0 ? sectors[start].nexttag : sectors[(unsigned)tag % numsectors].firsttag; while (start >= 0 && sectors[start].tag != tag) start = sectors[start].nexttag; return start; } } /** Searches the tag lists for the next line tagged to a line. * * \param line Tagged line used as a reference. * \param start -1 to start anew, or the result of a previous call to keep * searching. * \return Number of the next tagged line found. * \sa P_FindSectorFromLineTag */ static INT32 P_FindLineFromLineTag(const line_t *line, INT32 start) { if (line->tag == -1) { start++; if (start >= (INT32)numlines) return -1; return start; } else { start = start >= 0 ? lines[start].nexttag : lines[(unsigned)line->tag % numlines].firsttag; while (start >= 0 && lines[start].tag != line->tag) start = lines[start].nexttag; return start; } } #if 0 /** Searches the tag lists for the next line with a given tag and special. * * \param tag Tag number. * \param start -1 to start anew, or the result of a previous call to keep * searching. * \return Number of next suitable line found. * \sa P_FindLineFromLineTag * \author Graue <graue@oceanbase.org> */ static INT32 P_FindLineFromTag(INT32 tag, INT32 start) { if (tag == -1) { start++; if (start >= numlines) return -1; return start; } else { start = start >= 0 ? lines[start].nexttag : lines[(unsigned)tag % numlines].firsttag; while (start >= 0 && lines[start].tag != tag) start = lines[start].nexttag; return start; } } #endif // // P_FindSpecialLineFromTag // INT32 P_FindSpecialLineFromTag(INT16 special, INT16 tag, INT32 start) { if (tag == -1) { start++; while (lines[start].special != special) start++; if (start >= (INT32)numlines) return -1; return start; } else { start = start >= 0 ? lines[start].nexttag : lines[(unsigned)tag % numlines].firsttag; while (start >= 0 && (lines[start].tag != tag || lines[start].special != special)) start = lines[start].nexttag; return start; } } // haleyjd: temporary define #ifdef POLYOBJECTS // // PolyDoor // // Parses arguments for parameterized polyobject door types // static boolean PolyDoor(line_t *line) { polydoordata_t pdd; pdd.polyObjNum = line->tag; // polyobject id switch(line->special) { case 480: // Polyobj_DoorSlide pdd.doorType = POLY_DOOR_SLIDE; pdd.speed = sides[line->sidenum[0]].textureoffset / 8; pdd.angle = R_PointToAngle2(line->v1->x, line->v1->y, line->v2->x, line->v2->y); // angle of motion pdd.distance = sides[line->sidenum[0]].rowoffset; if (line->sidenum[1] != 0xffff) pdd.delay = sides[line->sidenum[1]].textureoffset >> FRACBITS; // delay in tics else pdd.delay = 0; break; case 481: // Polyobj_DoorSwing pdd.doorType = POLY_DOOR_SWING; pdd.speed = sides[line->sidenum[0]].textureoffset >> FRACBITS; // angular speed pdd.distance = sides[line->sidenum[0]].rowoffset >> FRACBITS; // angular distance if (line->sidenum[1] != 0xffff) pdd.delay = sides[line->sidenum[1]].textureoffset >> FRACBITS; // delay in tics else pdd.delay = 0; break; default: return 0; // ??? } return EV_DoPolyDoor(&pdd); } // // PolyMove // // Parses arguments for parameterized polyobject move specials // static boolean PolyMove(line_t *line) { polymovedata_t pmd; pmd.polyObjNum = line->tag; pmd.speed = sides[line->sidenum[0]].textureoffset / 8; pmd.angle = R_PointToAngle2(line->v1->x, line->v1->y, line->v2->x, line->v2->y); pmd.distance = sides[line->sidenum[0]].rowoffset; pmd.overRide = (line->special == 483); // Polyobj_OR_Move return EV_DoPolyObjMove(&pmd); } // // PolyInvisible // // Makes a polyobject invisible and intangible // If NOCLIMB is ticked, the polyobject will still be tangible, just not visible. // static void PolyInvisible(line_t *line) { INT32 polyObjNum = line->tag; polyobj_t *po; if (!(po = Polyobj_GetForNum(polyObjNum))) { CONS_Debug(DBG_POLYOBJ, "PolyInvisible: bad polyobj %d\n", polyObjNum); return; } // don't allow line actions to affect bad polyobjects if (po->isBad) return; if (!(line->flags & ML_NOCLIMB)) po->flags &= ~POF_SOLID; po->flags |= POF_NOSPECIALS; po->flags &= ~POF_RENDERALL; } // // PolyVisible // // Makes a polyobject visible and tangible // If NOCLIMB is ticked, the polyobject will not be tangible, just visible. // static void PolyVisible(line_t *line) { INT32 polyObjNum = line->tag; polyobj_t *po; if (!(po = Polyobj_GetForNum(polyObjNum))) { CONS_Debug(DBG_POLYOBJ, "PolyVisible: bad polyobj %d\n", polyObjNum); return; } // don't allow line actions to affect bad polyobjects if (po->isBad) return; if (!(line->flags & ML_NOCLIMB)) po->flags |= POF_SOLID; po->flags &= ~POF_NOSPECIALS; po->flags |= POF_RENDERALL; } // // PolyTranslucency // // Sets the translucency of a polyobject // Frontsector floor / 100 = translevel // static void PolyTranslucency(line_t *line) { INT32 polyObjNum = line->tag; polyobj_t *po; if (!(po = Polyobj_GetForNum(polyObjNum))) { CONS_Debug(DBG_POLYOBJ, "EV_DoPolyObjWaypoint: bad polyobj %d\n", polyObjNum); return; } // don't allow line actions to affect bad polyobjects if (po->isBad) return; po->translucency = (line->frontsector->floorheight >> FRACBITS) / 100; } // // PolyWaypoint // // Parses arguments for parameterized polyobject waypoint movement // static boolean PolyWaypoint(line_t *line) { polywaypointdata_t pwd; pwd.polyObjNum = line->tag; pwd.speed = sides[line->sidenum[0]].textureoffset / 8; pwd.sequence = sides[line->sidenum[0]].rowoffset >> FRACBITS; // Sequence # pwd.reverse = (line->flags & ML_EFFECT1) == ML_EFFECT1; // Reverse? pwd.comeback = (line->flags & ML_EFFECT2) == ML_EFFECT2; // Return when reaching end? pwd.wrap = (line->flags & ML_EFFECT3) == ML_EFFECT3; // Wrap around waypoints pwd.continuous = (line->flags & ML_EFFECT4) == ML_EFFECT4; // Continuously move - used with COMEBACK or WRAP return EV_DoPolyObjWaypoint(&pwd); } // // PolyRotate // // Parses arguments for parameterized polyobject rotate specials // static boolean PolyRotate(line_t *line) { polyrotdata_t prd; prd.polyObjNum = line->tag; prd.speed = sides[line->sidenum[0]].textureoffset >> FRACBITS; // angular speed prd.distance = sides[line->sidenum[0]].rowoffset >> FRACBITS; // angular distance // Polyobj_(OR_)RotateRight have dir == -1 prd.direction = (line->special == 484 || line->special == 485) ? -1 : 1; // Polyobj_OR types have override set to true prd.overRide = (line->special == 485 || line->special == 487); if (line->flags & ML_NOCLIMB) prd.turnobjs = 0; else if (line->flags & ML_EFFECT4) prd.turnobjs = 2; else prd.turnobjs = 1; return EV_DoPolyObjRotate(&prd); } // // PolyDisplace // // Parses arguments for parameterized polyobject move-by-sector-heights specials // static boolean PolyDisplace(line_t *line) { polydisplacedata_t pdd; pdd.polyObjNum = line->tag; pdd.controlSector = line->frontsector; pdd.dx = line->dx>>8; pdd.dy = line->dy>>8; return EV_DoPolyObjDisplace(&pdd); } #endif // ifdef POLYOBJECTS /** Changes a sector's tag. * Used by the linedef executor tag changer and by crumblers. * * \param sector Sector whose tag will be changed. * \param newtag New tag number for this sector. * \sa P_InitTagLists, P_FindSectorFromTag * \author Graue <graue@oceanbase.org> */ void P_ChangeSectorTag(UINT32 sector, INT16 newtag) { INT16 oldtag; INT32 i; I_Assert(sector < numsectors); if ((oldtag = sectors[sector].tag) == newtag) return; // first you have to remove it from the old tag's taglist i = sectors[(unsigned)oldtag % numsectors].firsttag; if (i == -1) // shouldn't happen I_Error("Corrupt tag list for sector %u\n", sector); else if ((UINT32)i == sector) sectors[(unsigned)oldtag % numsectors].firsttag = sectors[sector].nexttag; else { while (sectors[i].nexttag != -1 && (UINT32)sectors[i].nexttag < sector ) i = sectors[i].nexttag; sectors[i].nexttag = sectors[sector].nexttag; } sectors[sector].tag = newtag; // now add it to the new tag's taglist if ((UINT32)sectors[(unsigned)newtag % numsectors].firsttag > sector) { sectors[sector].nexttag = sectors[(unsigned)newtag % numsectors].firsttag; sectors[(unsigned)newtag % numsectors].firsttag = sector; } else { i = sectors[(unsigned)newtag % numsectors].firsttag; if (i == -1) { sectors[(unsigned)newtag % numsectors].firsttag = sector; sectors[sector].nexttag = -1; } else { while (sectors[i].nexttag != -1 && (UINT32)sectors[i].nexttag < sector ) i = sectors[i].nexttag; sectors[sector].nexttag = sectors[i].nexttag; sectors[i].nexttag = sector; } } } /** Hashes the sector tags across the sectors and linedefs. * * \sa P_FindSectorFromTag, P_ChangeSectorTag * \author Lee Killough */ static inline void P_InitTagLists(void) { register size_t i; for (i = numsectors - 1; i != (size_t)-1; i--) { size_t j = (unsigned)sectors[i].tag % numsectors; sectors[i].nexttag = sectors[j].firsttag; sectors[j].firsttag = (INT32)i; } for (i = numlines - 1; i != (size_t)-1; i--) { size_t j = (unsigned)lines[i].tag % numlines; lines[i].nexttag = lines[j].firsttag; lines[j].firsttag = (INT32)i; } } /** Finds minimum light from an adjacent sector. * * \param sector Sector to start in. * \param max Maximum value to return. * \return Minimum light value from an adjacent sector, or max if the minimum * light value is greater than max. */ INT32 P_FindMinSurroundingLight(sector_t *sector, INT32 max) { size_t i; INT32 min = max; line_t *line; sector_t *check; for (i = 0; i < sector->linecount; i++) { line = sector->lines[i]; check = getNextSector(line,sector); if (!check) continue; if (check->lightlevel < min) min = check->lightlevel; } return min; } void T_ExecutorDelay(executor_t *e) { if (--e->timer <= 0) { if (e->caller && P_MobjWasRemoved(e->caller)) // If the mobj died while we were delaying P_SetTarget(&e->caller, NULL); // Call with no mobj! P_ProcessLineSpecial(e->line, e->caller, e->sector); P_SetTarget(&e->caller, NULL); // Let the mobj know it can be removed now. P_RemoveThinker(&e->thinker); } } static void P_AddExecutorDelay(line_t *line, mobj_t *mobj, sector_t *sector) { executor_t *e; if (!line->backsector) I_Error("P_AddExecutorDelay: Line has no backsector!\n"); e = Z_Calloc(sizeof (*e), PU_LEVSPEC, NULL); e->thinker.function.acp1 = (actionf_p1)T_ExecutorDelay; e->line = line; e->sector = sector; e->timer = (line->backsector->ceilingheight>>FRACBITS)+(line->backsector->floorheight>>FRACBITS); P_SetTarget(&e->caller, mobj); // Use P_SetTarget to make sure the mobj doesn't get freed while we're delaying. P_AddThinker(&e->thinker); } /** Used by P_LinedefExecute to check a trigger linedef's conditions * The linedef executor specials in the trigger linedef's sector are run if all conditions are met. * Return false cancels P_LinedefExecute, this happens if a condition is not met. * * \param triggerline Trigger linedef to check conditions for; should NEVER be NULL. * \param actor Object initiating the action; should not be NULL. * \param caller Sector in which the action was started. May be NULL. * \sa P_ProcessLineSpecial, P_LinedefExecute */ boolean P_RunTriggerLinedef(line_t *triggerline, mobj_t *actor, sector_t *caller) { sector_t *ctlsector; fixed_t dist = P_AproxDistance(triggerline->dx, triggerline->dy)>>FRACBITS; size_t i, linecnt, sectori; INT16 specialtype = triggerline->special; ///////////////////////////////////////////////// // Distance-checking/sector trigger conditions // ///////////////////////////////////////////////// // Linetypes 303 and 304 require a specific // number, or minimum or maximum, of rings. if (specialtype == 303 || specialtype == 304) { fixed_t rings = 0; // With the passuse flag, count all player's // rings. if (triggerline->flags & ML_EFFECT4) { for (i = 0; i < MAXPLAYERS; i++) { if (!playeringame[i] || players[i].spectator) continue; if (!players[i].mo || players[i].mo->health < 1) continue; rings += players[i].mo->health-1; } } else { if (!(actor && actor->player)) return false; // no player to count rings from here, sorry rings = actor->health-1; } if (triggerline->flags & ML_NOCLIMB) { if (rings > dist) return false; } else if (triggerline->flags & ML_BLOCKMONSTERS) { if (rings < dist) return false; } else { if (rings != dist) return false; } } else if (specialtype >= 314 && specialtype <= 315) { msecnode_t *node; mobj_t *mo; INT32 numpush = 0; INT32 numneeded = dist; if (!caller) return false; // we need a calling sector to find pushables in, silly! // Count the pushables in this sector node = caller->touching_thinglist; // things touching this sector while (node) { mo = node->m_thing; if (mo->flags & MF_PUSHABLE) numpush++; node = node->m_snext; } if (triggerline->flags & ML_NOCLIMB) // Need at least or more { if (numpush < numneeded) return false; } else if (triggerline->flags & ML_EFFECT4) // Need less than { if (numpush >= numneeded) return false; } else // Need exact { if (numpush != numneeded) return false; } } else if (caller) { if (GETSECSPECIAL(caller->special, 2) == 6) { if (!(ALL7EMERALDS(emeralds))) return false; } else if (GETSECSPECIAL(caller->special, 2) == 7) { UINT8 mare; if (!(maptol & TOL_NIGHTS)) return false; mare = P_FindLowestMare(); if (triggerline->flags & ML_NOCLIMB) { if (!(mare <= dist)) return false; } else if (triggerline->flags & ML_BLOCKMONSTERS) { if (!(mare >= dist)) return false; } else { if (!(mare == dist)) return false; } } // If we were not triggered by a sector type especially for the purpose, // a Linedef Executor linedef trigger is not handling sector triggers properly, return. else if ((!GETSECSPECIAL(caller->special, 2) || GETSECSPECIAL(caller->special, 2) > 7) && (specialtype > 322)) { CONS_Alert(CONS_WARNING, M_GetText("Linedef executor trigger isn't handling sector triggers properly!\nspecialtype = %d, if you are not a dev, report this warning instance\nalong with the wad that caused it!\n"), specialtype); return false; } } ////////////////////////////////////// // Miscellaneous trigger conditions // ////////////////////////////////////// switch (specialtype) { case 305: // continuous case 306: // each time case 307: // once if (!(actor && actor->player && actor->player->charability != dist/10)) return false; break; case 309: // continuous case 310: // each time // Only red team members can activate this. if (!(actor && actor->player && actor->player->ctfteam == 1)) return false; break; case 311: // continuous case 312: // each time // Only blue team members can activate this. if (!(actor && actor->player && actor->player->ctfteam == 2)) return false; break; case 317: // continuous case 318: // once { // Unlockable triggers required INT32 trigid = (INT32)(sides[triggerline->sidenum[0]].textureoffset>>FRACBITS); if ((modifiedgame && !savemoddata) || (netgame || multiplayer)) return false; else if (trigid < 0 || trigid > 31) // limited by 32 bit variable { CONS_Debug(DBG_GAMELOGIC, "Unlockable trigger (sidedef %hu): bad trigger ID %d\n", triggerline->sidenum[0], trigid); return false; } else if (!(unlocktriggers & (1 << trigid))) return false; } break; case 319: // continuous case 320: // once { // An unlockable itself must be unlocked! INT32 unlockid = (INT32)(sides[triggerline->sidenum[0]].textureoffset>>FRACBITS); if ((modifiedgame && !savemoddata) || (netgame || multiplayer)) return false; else if (unlockid < 0 || unlockid >= MAXUNLOCKABLES) // limited by unlockable count { CONS_Debug(DBG_GAMELOGIC, "Unlockable check (sidedef %hu): bad unlockable ID %d\n", triggerline->sidenum[0], unlockid); return false; } else if (!(unlockables[unlockid-1].unlocked)) return false; } break; case 321: // continuous case 322: // each time // decrement calls left before triggering if (triggerline->callcount > 0) { if (--triggerline->callcount > 0) return false; } break; default: break; } ///////////////////////////////// // Processing linedef specials // ///////////////////////////////// ctlsector = triggerline->frontsector; sectori = (size_t)(ctlsector - sectors); linecnt = ctlsector->linecount; if (triggerline->flags & ML_EFFECT5) // disregard order for efficiency { for (i = 0; i < linecnt; i++) if (ctlsector->lines[i]->special >= 400 && ctlsector->lines[i]->special < 500) { if (ctlsector->lines[i]->flags & ML_DONTPEGTOP) P_AddExecutorDelay(ctlsector->lines[i], actor, caller); else P_ProcessLineSpecial(ctlsector->lines[i], actor, caller); } } else // walk around the sector in a defined order { boolean backwards = false; size_t j, masterlineindex = (size_t)-1; for (i = 0; i < linecnt; i++) if (ctlsector->lines[i] == triggerline) { masterlineindex = i; break; } #ifdef PARANOIA if (masterlineindex == (size_t)-1) { const size_t li = (size_t)(ctlsector->lines[i] - lines); I_Error("Line %s isn't linked into its front sector", sizeu1(li)); } #endif // i == masterlineindex for (;;) { if (backwards) // v2 to v1 { for (j = 0; j < linecnt; j++) { if (i == j) continue; if (ctlsector->lines[i]->v1 == ctlsector->lines[j]->v2) { i = j; break; } if (ctlsector->lines[i]->v1 == ctlsector->lines[j]->v1) { i = j; backwards = false; break; } } if (j == linecnt) { const size_t vertexei = (size_t)(ctlsector->lines[i]->v1 - vertexes); CONS_Debug(DBG_GAMELOGIC, "Warning: Sector %s is not closed at vertex %s (%d, %d)\n", sizeu1(sectori), sizeu2(vertexei), ctlsector->lines[i]->v1->x, ctlsector->lines[i]->v1->y); return false; // abort } } else // v1 to v2 { for (j = 0; j < linecnt; j++) { if (i == j) continue; if (ctlsector->lines[i]->v2 == ctlsector->lines[j]->v1) { i = j; break; } if (ctlsector->lines[i]->v2 == ctlsector->lines[j]->v2) { i = j; backwards = true; break; } } if (j == linecnt) { const size_t vertexei = (size_t)(ctlsector->lines[i]->v1 - vertexes); CONS_Debug(DBG_GAMELOGIC, "Warning: Sector %s is not closed at vertex %s (%d, %d)\n", sizeu1(sectori), sizeu2(vertexei), ctlsector->lines[i]->v2->x, ctlsector->lines[i]->v2->y); return false; // abort } } if (i == masterlineindex) break; if (ctlsector->lines[i]->special >= 400 && ctlsector->lines[i]->special < 500) { if (ctlsector->lines[i]->flags & ML_DONTPEGTOP) P_AddExecutorDelay(ctlsector->lines[i], actor, caller); else P_ProcessLineSpecial(ctlsector->lines[i], actor, caller); } } } // "Trigger on X calls" linedefs reset if noclimb is set if ((specialtype == 321 || specialtype == 322) && triggerline->flags & ML_NOCLIMB) triggerline->callcount = sides[triggerline->sidenum[0]].textureoffset>>FRACBITS; else // These special types work only once if (specialtype == 302 // Once || specialtype == 304 // Ring count - Once || specialtype == 307 // Character ability - Once || specialtype == 308 // Race only - Once || specialtype == 315 // No of pushables - Once || specialtype == 318 // Unlockable trigger - Once || specialtype == 320 // Unlockable - Once || specialtype == 321 || specialtype == 322 // Trigger on X calls - Continuous + Each Time || specialtype == 399) // Level Load triggerline->special = 0; // Clear it out return true; } /** Runs a linedef executor. * Can be called by: * - a player moving into a special sector or FOF. * - a pushable object moving into a special sector or FOF. * - a ceiling or floor movement from a previous linedef executor finishing. * - any object in a state with the A_LinedefExecute() action. * * \param tag Tag of the linedef executor to run. * \param actor Object initiating the action; should not be NULL. * \param caller Sector in which the action was started. May be NULL. * \sa P_ProcessLineSpecial, P_RunTriggerLinedef * \author Graue <graue@oceanbase.org> */ void P_LinedefExecute(INT16 tag, mobj_t *actor, sector_t *caller) { size_t masterline; CONS_Debug(DBG_GAMELOGIC, "P_LinedefExecute: Executing trigger linedefs of tag %d\n", tag); I_Assert(!actor || !P_MobjWasRemoved(actor)); // If actor is there, it must be valid. for (masterline = 0; masterline < numlines; masterline++) { if (lines[masterline].tag != tag) continue; // "No More Enemies" and "Level Load" take care of themselves. if (lines[masterline].special == 313 || lines[masterline].special == 399 // Each-time executors handle themselves, too || lines[masterline].special == 301 // Each time || lines[masterline].special == 306 // Character ability - Each time || lines[masterline].special == 310 // CTF Red team - Each time || lines[masterline].special == 312 // CTF Blue team - Each time || lines[masterline].special == 322) // Trigger on X calls - Each Time continue; if (lines[masterline].special < 300 || lines[masterline].special > 399) continue; if (!P_RunTriggerLinedef(&lines[masterline], actor, caller)) return; // cancel P_LinedefExecute if function returns false } } // // P_SwitchWeather // // Switches the weather! // void P_SwitchWeather(INT32 weathernum) { boolean purge = false; INT32 swap = 0; switch (weathernum) { case PRECIP_NONE: // None if (curWeather == PRECIP_NONE) return; // Nothing to do. purge = true; break; case PRECIP_STORM: // Storm case PRECIP_STORM_NOSTRIKES: // Storm w/ no lightning case PRECIP_RAIN: // Rain if (curWeather == PRECIP_SNOW || curWeather == PRECIP_BLANK || curWeather == PRECIP_STORM_NORAIN) swap = PRECIP_RAIN; break; case PRECIP_SNOW: // Snow if (curWeather == PRECIP_SNOW) return; // Nothing to do. if (curWeather == PRECIP_RAIN || curWeather == PRECIP_STORM || curWeather == PRECIP_STORM_NOSTRIKES || curWeather == PRECIP_BLANK || curWeather == PRECIP_STORM_NORAIN) swap = PRECIP_SNOW; // Need to delete the other precips. break; case PRECIP_STORM_NORAIN: // Storm w/o rain if (curWeather == PRECIP_SNOW || curWeather == PRECIP_STORM || curWeather == PRECIP_STORM_NOSTRIKES || curWeather == PRECIP_RAIN || curWeather == PRECIP_BLANK) swap = PRECIP_STORM_NORAIN; else if (curWeather == PRECIP_STORM_NORAIN) return; break; case PRECIP_BLANK: if (curWeather == PRECIP_SNOW || curWeather == PRECIP_STORM || curWeather == PRECIP_STORM_NOSTRIKES || curWeather == PRECIP_RAIN) swap = PRECIP_BLANK; else if (curWeather == PRECIP_STORM_NORAIN) swap = PRECIP_BLANK; else if (curWeather == PRECIP_BLANK) return; break; default: CONS_Debug(DBG_GAMELOGIC, "P_SwitchWeather: Unknown weather type %d.\n", weathernum); break; } if (purge) { thinker_t *think; precipmobj_t *precipmobj; for (think = thinkercap.next; think != &thinkercap; think = think->next) { if ((think->function.acp1 != (actionf_p1)P_SnowThinker) && (think->function.acp1 != (actionf_p1)P_RainThinker)) continue; // not a precipmobj thinker precipmobj = (precipmobj_t *)think; P_RemovePrecipMobj(precipmobj); } } else if (swap && !((swap == PRECIP_BLANK && curWeather == PRECIP_STORM_NORAIN) || (swap == PRECIP_STORM_NORAIN && curWeather == PRECIP_BLANK))) // Rather than respawn all that crap, reuse it! { thinker_t *think; precipmobj_t *precipmobj; state_t *st; for (think = thinkercap.next; think != &thinkercap; think = think->next) { if (swap == PRECIP_RAIN) // Snow To Rain { if (!(think->function.acp1 == (actionf_p1)P_SnowThinker || think->function.acp1 == (actionf_p1)P_NullPrecipThinker)) continue; // not a precipmobj thinker precipmobj = (precipmobj_t *)think; precipmobj->flags = mobjinfo[MT_RAIN].flags; st = &states[mobjinfo[MT_RAIN].spawnstate]; precipmobj->state = st; precipmobj->tics = st->tics; precipmobj->sprite = st->sprite; precipmobj->frame = st->frame; precipmobj->momz = mobjinfo[MT_RAIN].speed; precipmobj->precipflags &= ~PCF_INVISIBLE; think->function.acp1 = (actionf_p1)P_RainThinker; } else if (swap == PRECIP_SNOW) // Rain To Snow { INT32 z; if (!(think->function.acp1 == (actionf_p1)P_RainThinker || think->function.acp1 == (actionf_p1)P_NullPrecipThinker)) continue; // not a precipmobj thinker precipmobj = (precipmobj_t *)think; precipmobj->flags = mobjinfo[MT_SNOWFLAKE].flags; z = M_RandomByte(); if (z < 64) z = 2; else if (z < 144) z = 1; else z = 0; st = &states[mobjinfo[MT_SNOWFLAKE].spawnstate+z]; precipmobj->state = st; precipmobj->tics = st->tics; precipmobj->sprite = st->sprite; precipmobj->frame = st->frame; precipmobj->momz = mobjinfo[MT_SNOWFLAKE].speed; precipmobj->precipflags &= ~PCF_INVISIBLE; think->function.acp1 = (actionf_p1)P_SnowThinker; } else if (swap == PRECIP_BLANK || swap == PRECIP_STORM_NORAIN) // Remove precip, but keep it around for reuse. { if (!(think->function.acp1 == (actionf_p1)P_RainThinker || think->function.acp1 == (actionf_p1)P_SnowThinker)) continue; precipmobj = (precipmobj_t *)think; think->function.acp1 = (actionf_p1)P_NullPrecipThinker; precipmobj->precipflags |= PCF_INVISIBLE; } } } switch (weathernum) { case PRECIP_SNOW: // snow curWeather = PRECIP_SNOW; if (!swap) P_SpawnPrecipitation(); break; case PRECIP_RAIN: // rain { boolean dontspawn = false; if (curWeather == PRECIP_RAIN || curWeather == PRECIP_STORM || curWeather == PRECIP_STORM_NOSTRIKES) dontspawn = true; curWeather = PRECIP_RAIN; if (!dontspawn && !swap) P_SpawnPrecipitation(); break; } case PRECIP_STORM: // storm { boolean dontspawn = false; if (curWeather == PRECIP_RAIN || curWeather == PRECIP_STORM || curWeather == PRECIP_STORM_NOSTRIKES) dontspawn = true; curWeather = PRECIP_STORM; if (!dontspawn && !swap) P_SpawnPrecipitation(); break; } case PRECIP_STORM_NOSTRIKES: // storm w/o lightning { boolean dontspawn = false; if (curWeather == PRECIP_RAIN || curWeather == PRECIP_STORM || curWeather == PRECIP_STORM_NOSTRIKES) dontspawn = true; curWeather = PRECIP_STORM_NOSTRIKES; if (!dontspawn && !swap) P_SpawnPrecipitation(); break; } case PRECIP_STORM_NORAIN: // storm w/o rain curWeather = PRECIP_STORM_NORAIN; if (!swap) P_SpawnPrecipitation(); break; case PRECIP_BLANK: curWeather = PRECIP_BLANK; if (!swap) P_SpawnPrecipitation(); break; default: curWeather = PRECIP_NONE; break; } } /** Gets an object. * * \param type Object type to look for. * \param s Sector number to look in. * \return Pointer to the first ::type found in the sector. * \sa P_GetPushThing */ static mobj_t *P_GetObjectTypeInSectorNum(mobjtype_t type, size_t s) { sector_t *sec = sectors + s; mobj_t *thing = sec->thinglist; while (thing) { if (thing->type == type) return thing; thing = thing->snext; } return NULL; } /** Processes the line special triggered by an object. * * \param line Line with the special command on it. * \param mo mobj that triggered the line. Must be valid and non-NULL. * \param callsec sector in which action was initiated; this can be NULL. * Because of the A_LinedefExecute() action, even if non-NULL, * this sector might not have the same tag as the linedef executor, * and it might not have the linedef executor sector type. * \todo Handle mo being NULL gracefully. T_MoveFloor() and T_MoveCeiling() * don't have an object to pass. * \todo Split up into multiple functions. * \sa P_LinedefExecute * \author Graue <graue@oceanbase.org> */ static void P_ProcessLineSpecial(line_t *line, mobj_t *mo, sector_t *callsec) { INT32 secnum = -1; mobj_t *bot = NULL; I_Assert(!mo || !P_MobjWasRemoved(mo)); // If mo is there, mo must be valid! if (mo && mo->player && botingame) bot = players[secondarydisplayplayer].mo; // note: only commands with linedef types >= 400 && < 500 can be used switch (line->special) { case 400: // Set tagged sector's floor height/pic EV_DoFloor(line, instantMoveFloorByFrontSector); break; case 401: // Set tagged sector's ceiling height/pic EV_DoCeiling(line, instantMoveCeilingByFrontSector); break; case 402: // Set tagged sector's light level { INT16 newlightlevel; INT32 newfloorlightsec, newceilinglightsec; newlightlevel = line->frontsector->lightlevel; newfloorlightsec = line->frontsector->floorlightsec; newceilinglightsec = line->frontsector->ceilinglightsec; // act on all sectors with the same tag as the triggering linedef while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { if (sectors[secnum].lightingdata) { // Stop the lighting madness going on in this sector! P_RemoveThinker(&((elevator_t *)sectors[secnum].lightingdata)->thinker); sectors[secnum].lightingdata = NULL; // No, it's not an elevator_t, but any struct with a thinker_t named // 'thinker' at the beginning will do here. (We don't know what it // actually is: could be lightlevel_t, fireflicker_t, glow_t, etc.) } sectors[secnum].lightlevel = newlightlevel; sectors[secnum].floorlightsec = newfloorlightsec; sectors[secnum].ceilinglightsec = newceilinglightsec; } } break; case 403: // Move floor, linelen = speed, frontsector floor = dest height EV_DoFloor(line, moveFloorByFrontSector); break; case 404: // Move ceiling, linelen = speed, frontsector ceiling = dest height EV_DoCeiling(line, moveCeilingByFrontSector); break; case 405: // Move floor by front side texture offsets, offset x = speed, offset y = amount to raise/lower EV_DoFloor(line, moveFloorByFrontTexture); break; case 407: // Move ceiling by front side texture offsets, offset x = speed, offset y = amount to raise/lower EV_DoCeiling(line, moveCeilingByFrontTexture); break; /* case 405: // Lower floor by line, dx = speed, dy = amount to lower EV_DoFloor(line, lowerFloorByLine); break; case 406: // Raise floor by line, dx = speed, dy = amount to raise EV_DoFloor(line, raiseFloorByLine); break; case 407: // Lower ceiling by line, dx = speed, dy = amount to lower EV_DoCeiling(line, lowerCeilingByLine); break; case 408: // Raise ceiling by line, dx = speed, dy = amount to raise EV_DoCeiling(line, raiseCeilingByLine); break;*/ case 409: // Change tagged sectors' tag // (formerly "Change calling sectors' tag", but behavior was changed) { while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) P_ChangeSectorTag(secnum,(INT16)(sides[line->sidenum[0]].textureoffset>>FRACBITS)); break; } case 410: // Change front sector's tag P_ChangeSectorTag((UINT32)(line->frontsector - sectors), (INT16)(sides[line->sidenum[0]].textureoffset>>FRACBITS)); break; case 411: // Stop floor/ceiling movement in tagged sector(s) while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { if (sectors[secnum].floordata) { if (sectors[secnum].floordata == sectors[secnum].ceilingdata) // elevator { P_RemoveThinker(&((elevator_t *)sectors[secnum].floordata)->thinker); sectors[secnum].floordata = sectors[secnum].ceilingdata = NULL; sectors[secnum].floorspeed = sectors[secnum].ceilspeed = 0; } else // floormove { P_RemoveThinker(&((floormove_t *)sectors[secnum].floordata)->thinker); sectors[secnum].floordata = NULL; sectors[secnum].floorspeed = 0; } } if (sectors[secnum].ceilingdata) // ceiling { P_RemoveThinker(&((ceiling_t *)sectors[secnum].ceilingdata)->thinker); sectors[secnum].ceilingdata = NULL; sectors[secnum].ceilspeed = 0; } } break; case 412: // Teleport the player or thing { mobj_t *dest; if (!mo) // nothing to teleport return; if (line->flags & ML_EFFECT3) // Relative silent teleport { fixed_t x, y, z; x = sides[line->sidenum[0]].textureoffset; y = sides[line->sidenum[0]].rowoffset; z = line->frontsector->ceilingheight; P_UnsetThingPosition(mo); mo->x += x; mo->y += y; mo->z += z; P_SetThingPosition(mo); if (mo->player) { if (bot) // This might put poor Tails in a wall if he's too far behind! D: But okay, whatever! >:3 P_TeleportMove(bot, bot->x + x, bot->y + y, bot->z + z); if (splitscreen && mo->player == &players[secondarydisplayplayer] && camera2.chase) { camera2.x += x; camera2.y += y; camera2.z += z; camera2.subsector = R_PointInSubsector(camera2.x, camera2.y); } else if (camera.chase && mo->player == &players[displayplayer]) { camera.x += x; camera.y += y; camera.z += z; camera.subsector = R_PointInSubsector(camera.x, camera.y); } } } else { if ((secnum = P_FindSectorFromLineTag(line, -1)) < 0) return; dest = P_GetObjectTypeInSectorNum(MT_TELEPORTMAN, secnum); if (!dest) return; if (bot) P_Teleport(bot, dest->x, dest->y, dest->z, (line->flags & ML_NOCLIMB) ? mo->angle : dest->angle, (line->flags & ML_BLOCKMONSTERS) == 0, (line->flags & ML_EFFECT4) == ML_EFFECT4); if (line->flags & ML_BLOCKMONSTERS) P_Teleport(mo, dest->x, dest->y, dest->z, (line->flags & ML_NOCLIMB) ? mo->angle : dest->angle, false, (line->flags & ML_EFFECT4) == ML_EFFECT4); else { P_Teleport(mo, dest->x, dest->y, dest->z, (line->flags & ML_NOCLIMB) ? mo->angle : dest->angle, true, (line->flags & ML_EFFECT4) == ML_EFFECT4); // Play the 'bowrwoosh!' sound S_StartSound(dest, sfx_mixup); } } } break; case 413: // Change music // console player only unless NOCLIMB is set if ((line->flags & ML_NOCLIMB) || (mo && mo->player && P_IsLocalPlayer(mo->player))) { UINT16 tracknum = (UINT16)sides[line->sidenum[0]].bottomtexture; strncpy(mapmusname, sides[line->sidenum[0]].text, 7); mapmusname[6] = 0; mapmusflags = tracknum & MUSIC_TRACKMASK; if (!(line->flags & ML_BLOCKMONSTERS)) mapmusflags |= MUSIC_RELOADRESET; S_ChangeMusic(mapmusname, mapmusflags, !(line->flags & ML_EFFECT4)); // Except, you can use the ML_BLOCKMONSTERS flag to change this behavior. // if (mapmusflags & MUSIC_RELOADRESET) then it will reset the music in G_PlayerReborn. } break; case 414: // Play SFX { fixed_t sfxnum; sfxnum = sides[line->sidenum[0]].toptexture; //P_AproxDistance(line->dx, line->dy)>>FRACBITS; if (line->tag != 0 && line->flags & ML_EFFECT5) { sector_t *sec; while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { sec = &sectors[secnum]; S_StartSound(&sec->soundorg, sfxnum); } } else if (line->tag != 0 && mo) { // Only trigger if mobj is touching the tag ffloor_t *rover; boolean foundit = false; for(rover = mo->subsector->sector->ffloors; rover; rover = rover->next) { if (rover->master->frontsector->tag != line->tag) continue; if (mo->z > *rover->topheight) continue; if (mo->z + mo->height < *rover->bottomheight) continue; foundit = true; } if (mo->subsector->sector->tag == line->tag) foundit = true; if (!foundit) return; } if (sfxnum < NUMSFX && sfxnum > sfx_None) { if (line->flags & ML_NOCLIMB) { // play the sound from nowhere, but only if display player triggered it if (mo && mo->player && (mo->player == &players[displayplayer] || mo->player == &players[secondarydisplayplayer])) S_StartSound(NULL, sfxnum); } else if (line->flags & ML_EFFECT4) { // play the sound from nowhere S_StartSound(NULL, sfxnum); } else if (line->flags & ML_BLOCKMONSTERS) { // play the sound from calling sector's soundorg if (callsec) S_StartSound(&callsec->soundorg, sfxnum); else if (mo) S_StartSound(&mo->subsector->sector->soundorg, sfxnum); } else if (mo) { // play the sound from mobj that triggered it S_StartSound(mo, sfxnum); } } } break; case 415: // Run a script if (cv_runscripts.value) { INT32 scrnum; lumpnum_t lumpnum; char newname[9]; strcpy(newname, G_BuildMapName(gamemap)); newname[0] = 'S'; newname[1] = 'C'; newname[2] = 'R'; scrnum = sides[line->sidenum[0]].textureoffset>>FRACBITS; if (scrnum < 0 || scrnum > 999) { scrnum = 0; newname[5] = newname[6] = newname[7] = '0'; } else { newname[5] = (char)('0' + (char)((scrnum/100))); newname[6] = (char)('0' + (char)((scrnum%100)/10)); newname[7] = (char)('0' + (char)(scrnum%10)); } newname[8] = '\0'; lumpnum = W_CheckNumForName(newname); if (lumpnum == LUMPERROR || W_LumpLength(lumpnum) == 0) { CONS_Debug(DBG_SETUP, "SOC Error: script lump %s not found/not valid.\n", newname); } else COM_BufInsertText(W_CacheLumpNum(lumpnum, PU_CACHE)); } break; case 416: // Spawn adjustable fire flicker while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { if (line->flags & ML_NOCLIMB && line->backsector) { // Use front sector for min light level, back sector for max. // This is tricky because P_SpawnAdjustableFireFlicker expects // the maxsector (second argument) to also be the target // sector, so we have to do some light level twiddling. fireflicker_t *flick; INT16 reallightlevel = sectors[secnum].lightlevel; sectors[secnum].lightlevel = line->backsector->lightlevel; flick = P_SpawnAdjustableFireFlicker(line->frontsector, &sectors[secnum], P_AproxDistance(line->dx, line->dy)>>FRACBITS); // Make sure the starting light level is in range. if (reallightlevel < flick->minlight) reallightlevel = (INT16)flick->minlight; else if (reallightlevel > flick->maxlight) reallightlevel = (INT16)flick->maxlight; sectors[secnum].lightlevel = reallightlevel; } else { // Use front sector for min, target sector for max, // the same way linetype 61 does it. P_SpawnAdjustableFireFlicker(line->frontsector, &sectors[secnum], P_AproxDistance(line->dx, line->dy)>>FRACBITS); } } break; case 417: // Spawn adjustable glowing light while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { if (line->flags & ML_NOCLIMB && line->backsector) { // Use front sector for min light level, back sector for max. // This is tricky because P_SpawnAdjustableGlowingLight expects // the maxsector (second argument) to also be the target // sector, so we have to do some light level twiddling. glow_t *glow; INT16 reallightlevel = sectors[secnum].lightlevel; sectors[secnum].lightlevel = line->backsector->lightlevel; glow = P_SpawnAdjustableGlowingLight(line->frontsector, &sectors[secnum], P_AproxDistance(line->dx, line->dy)>>FRACBITS); // Make sure the starting light level is in range. if (reallightlevel < glow->minlight) reallightlevel = (INT16)glow->minlight; else if (reallightlevel > glow->maxlight) reallightlevel = (INT16)glow->maxlight; sectors[secnum].lightlevel = reallightlevel; } else { // Use front sector for min, target sector for max, // the same way linetype 602 does it. P_SpawnAdjustableGlowingLight(line->frontsector, &sectors[secnum], P_AproxDistance(line->dx, line->dy)>>FRACBITS); } } break; case 418: // Spawn adjustable strobe flash (unsynchronized) while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { if (line->flags & ML_NOCLIMB && line->backsector) { // Use front sector for min light level, back sector for max. // This is tricky because P_SpawnAdjustableGlowingLight expects // the maxsector (second argument) to also be the target // sector, so we have to do some light level twiddling. strobe_t *flash; INT16 reallightlevel = sectors[secnum].lightlevel; sectors[secnum].lightlevel = line->backsector->lightlevel; flash = P_SpawnAdjustableStrobeFlash(line->frontsector, &sectors[secnum], abs(line->dx)>>FRACBITS, abs(line->dy)>>FRACBITS, false); // Make sure the starting light level is in range. if (reallightlevel < flash->minlight) reallightlevel = (INT16)flash->minlight; else if (reallightlevel > flash->maxlight) reallightlevel = (INT16)flash->maxlight; sectors[secnum].lightlevel = reallightlevel; } else { // Use front sector for min, target sector for max, // the same way linetype 602 does it. P_SpawnAdjustableStrobeFlash(line->frontsector, &sectors[secnum], abs(line->dx)>>FRACBITS, abs(line->dy)>>FRACBITS, false); } } break; case 419: // Spawn adjustable strobe flash (synchronized) while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { if (line->flags & ML_NOCLIMB && line->backsector) { // Use front sector for min light level, back sector for max. // This is tricky because P_SpawnAdjustableGlowingLight expects // the maxsector (second argument) to also be the target // sector, so we have to do some light level twiddling. strobe_t *flash; INT16 reallightlevel = sectors[secnum].lightlevel; sectors[secnum].lightlevel = line->backsector->lightlevel; flash = P_SpawnAdjustableStrobeFlash(line->frontsector, &sectors[secnum], abs(line->dx)>>FRACBITS, abs(line->dy)>>FRACBITS, true); // Make sure the starting light level is in range. if (reallightlevel < flash->minlight) reallightlevel = (INT16)flash->minlight; else if (reallightlevel > flash->maxlight) reallightlevel = (INT16)flash->maxlight; sectors[secnum].lightlevel = reallightlevel; } else { // Use front sector for min, target sector for max, // the same way linetype 602 does it. P_SpawnAdjustableStrobeFlash(line->frontsector, &sectors[secnum], abs(line->dx)>>FRACBITS, abs(line->dy)>>FRACBITS, true); } } break; case 420: // Fade light levels in tagged sectors to new value P_FadeLight(line->tag, line->frontsector->lightlevel, P_AproxDistance(line->dx, line->dy)>>FRACBITS); break; case 421: // Stop lighting effect in tagged sectors while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) if (sectors[secnum].lightingdata) { P_RemoveThinker(&((elevator_t *)sectors[secnum].lightingdata)->thinker); sectors[secnum].lightingdata = NULL; } break; case 422: // Cut away to another view { mobj_t *altview; if (!mo || !mo->player) // only players have views return; if ((secnum = P_FindSectorFromLineTag(line, -1)) < 0) return; altview = P_GetObjectTypeInSectorNum(MT_ALTVIEWMAN, secnum); if (!altview) return; P_SetTarget(&mo->player->awayviewmobj, altview); mo->player->awayviewtics = P_AproxDistance(line->dx, line->dy)>>FRACBITS; if (line->flags & ML_NOCLIMB) // lets you specify a vertical angle { INT32 aim; aim = sides[line->sidenum[0]].textureoffset>>FRACBITS; while (aim < 0) aim += 360; while (aim >= 360) aim -= 360; aim *= (ANGLE_90>>8); aim /= 90; aim <<= 8; mo->player->awayviewaiming = (angle_t)aim; } else mo->player->awayviewaiming = 0; // straight ahead } break; case 423: // Change Sky if ((mo && mo->player && P_IsLocalPlayer(mo->player)) || (line->flags & ML_NOCLIMB)) P_SetupLevelSky(sides[line->sidenum[0]].textureoffset>>FRACBITS, (line->flags & ML_NOCLIMB)); break; case 424: // Change Weather if (line->flags & ML_NOCLIMB) { globalweather = (UINT8)(sides[line->sidenum[0]].textureoffset>>FRACBITS); P_SwitchWeather(globalweather); } else if (mo && mo->player && P_IsLocalPlayer(mo->player)) P_SwitchWeather(sides[line->sidenum[0]].textureoffset>>FRACBITS); break; case 425: // Calls P_SetMobjState on calling mobj if (mo && !mo->player) P_SetMobjState(mo, sides[line->sidenum[0]].toptexture); //P_AproxDistance(line->dx, line->dy)>>FRACBITS); break; case 426: // Moves the mobj to its sector's soundorg and on the floor, and stops it if (!mo) return; if (line->flags & ML_NOCLIMB) { P_UnsetThingPosition(mo); mo->x = mo->subsector->sector->soundorg.x; mo->y = mo->subsector->sector->soundorg.y; mo->z = mo->floorz; P_SetThingPosition(mo); } mo->momx = mo->momy = mo->momz = 1; mo->pmomz = 0; if (mo->player) { mo->player->rmomx = mo->player->rmomy = 1; mo->player->cmomx = mo->player->cmomy = 0; P_ResetPlayer(mo->player); P_SetPlayerMobjState(mo, S_PLAY_STND); // Reset bot too. if (bot) { if (line->flags & ML_NOCLIMB) P_TeleportMove(bot, mo->x, mo->y, mo->z); bot->momx = bot->momy = bot->momz = 1; bot->pmomz = 0; bot->player->rmomx = bot->player->rmomy = 1; bot->player->cmomx = bot->player->cmomy = 0; P_ResetPlayer(bot->player); P_SetPlayerMobjState(bot, S_PLAY_STND); } } break; case 427: // Awards points if the mobj is a player if (mo && mo->player) P_AddPlayerScore(mo->player, sides[line->sidenum[0]].textureoffset>>FRACBITS); break; case 428: // Start floating platform movement EV_DoElevator(line, elevateContinuous, true); break; case 429: // Crush Ceiling Down Once EV_DoCrush(line, crushCeilOnce); break; case 430: // Crush Floor Up Once EV_DoFloor(line, crushFloorOnce); break; case 431: // Crush Floor & Ceiling to middle Once EV_DoCrush(line, crushBothOnce); break; case 432: // Enable 2D Mode (Disable if noclimb) if (mo->player) { if (line->flags & ML_NOCLIMB) mo->flags2 &= ~MF2_TWOD; else mo->flags2 |= MF2_TWOD; // Copy effect to bot if necessary // (Teleport them to you so they don't break it.) if (bot && (bot->flags2 & MF2_TWOD) != (mo->flags2 & MF2_TWOD)) { bot->flags2 = (bot->flags2 & ~MF2_TWOD) | (mo->flags2 & MF2_TWOD); P_TeleportMove(bot, mo->x, mo->y, mo->z); } } break; case 433: // Flip gravity (Flop gravity if noclimb) Works on pushables, too! if (line->flags & ML_NOCLIMB) mo->flags2 &= ~MF2_OBJECTFLIP; else mo->flags2 |= MF2_OBJECTFLIP; if (bot) bot->flags2 = (bot->flags2 & ~MF2_OBJECTFLIP) | (mo->flags2 & MF2_OBJECTFLIP); break; case 434: // Custom Power if (mo->player) { mobj_t *dummy = P_SpawnMobj(mo->x, mo->y, mo->z, MT_NULL); var1 = sides[line->sidenum[0]].toptexture; //(line->dx>>FRACBITS)-1; if (line->sidenum[1] != 0xffff && line->flags & ML_BLOCKMONSTERS) // read power from back sidedef var2 = sides[line->sidenum[1]].toptexture; else if (line->flags & ML_NOCLIMB) // 'Infinite' var2 = UINT16_MAX; else var2 = sides[line->sidenum[0]].textureoffset>>FRACBITS; P_SetTarget(&dummy->target, mo); A_CustomPower(dummy); if (bot) { P_SetTarget(&dummy->target, bot); A_CustomPower(dummy); } P_RemoveMobj(dummy); } break; case 435: // Change scroller direction { scroll_t *scroller; thinker_t *th; for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)T_Scroll) continue; scroller = (scroll_t *)th; if (sectors[scroller->affectee].tag != line->tag) continue; scroller->dx = FixedMul(line->dx>>SCROLL_SHIFT, CARRYFACTOR); scroller->dy = FixedMul(line->dy>>SCROLL_SHIFT, CARRYFACTOR); } } break; case 436: // Shatter block remotely { INT16 sectag = (INT16)(sides[line->sidenum[0]].textureoffset>>FRACBITS); INT16 foftag = (INT16)(sides[line->sidenum[0]].rowoffset>>FRACBITS); sector_t *sec; // Sector that the FOF is visible in ffloor_t *rover; // FOF that we are going to crumble for (secnum = -1; (secnum = P_FindSectorFromTag(sectag, secnum)) >= 0 ;) { sec = sectors + secnum; if (!sec->ffloors) { CONS_Debug(DBG_GAMELOGIC, "Line type 436 Executor: Target sector #%d has no FOFs.\n", secnum); return; } for (rover = sec->ffloors; rover; rover = rover->next) { if (rover->master->frontsector->tag == foftag) break; } if (!rover) { CONS_Debug(DBG_GAMELOGIC, "Line type 436 Executor: Can't find a FOF control sector with tag %d\n", foftag); return; } EV_CrumbleChain(sec, rover); } } break; case 437: // Disable Player Controls if (mo->player) { UINT16 fractime = (UINT16)(sides[line->sidenum[0]].textureoffset>>FRACBITS); if (fractime < 1) fractime = 1; //instantly wears off upon leaving if (line->flags & ML_NOCLIMB) fractime |= 1<<15; //more crazy &ing, as if music stuff wasn't enough mo->player->powers[pw_nocontrol] = fractime; if (bot) bot->player->powers[pw_nocontrol] = fractime; } break; case 438: // Set player scale if (mo) { mo->destscale = FixedDiv(P_AproxDistance(line->dx, line->dy), 100<<FRACBITS); if (mo->destscale < FRACUNIT/100) mo->destscale = FRACUNIT/100; if (mo->player && bot) bot->destscale = mo->destscale; } break; case 439: // Set texture { size_t linenum; side_t *set = &sides[line->sidenum[0]], *this; boolean always = !(line->flags & ML_NOCLIMB); // If noclimb: Only change mid texture if mid texture already exists on tagged lines, etc. for (linenum = 0; linenum < numlines; linenum++) { if (lines[linenum].special == 439) continue; // Don't override other set texture lines! if (lines[linenum].tag != line->tag) continue; // Find tagged lines // Front side this = &sides[lines[linenum].sidenum[0]]; if (always || this->toptexture) this->toptexture = set->toptexture; if (always || this->midtexture) this->midtexture = set->midtexture; if (always || this->bottomtexture) this->bottomtexture = set->bottomtexture; if (lines[linenum].sidenum[1] == 0xffff) continue; // One-sided stops here. // Back side this = &sides[lines[linenum].sidenum[1]]; if (always || this->toptexture) this->toptexture = set->toptexture; if (always || this->midtexture) this->midtexture = set->midtexture; if (always || this->bottomtexture) this->bottomtexture = set->bottomtexture; } } break; case 440: // Play race countdown and start Metal Sonic if (!metalrecording && !metalplayback) G_DoPlayMetal(); break; case 441: // Trigger unlockable if ((!modifiedgame || savemoddata) && !(netgame || multiplayer)) { INT32 trigid = (INT32)(sides[line->sidenum[0]].textureoffset>>FRACBITS); if (trigid < 0 || trigid > 31) // limited by 32 bit variable CONS_Debug(DBG_GAMELOGIC, "Unlockable trigger (sidedef %hu): bad trigger ID %d\n", line->sidenum[0], trigid); else { unlocktriggers |= 1 << trigid; // Unlocked something? if (M_UpdateUnlockablesAndExtraEmblems()) { S_StartSound(NULL, sfx_ncitem); G_SaveGameData(); // only save if unlocked something } } } // Execute one time only line->special = 0; break; case 442: // Calls P_SetMobjState on mobjs of a given type in the tagged sectors { const mobjtype_t type = (mobjtype_t)sides[line->sidenum[0]].toptexture; statenum_t state = NUMSTATES; sector_t *sec; mobj_t *thing; if (line->sidenum[1] != 0xffff) state = (statenum_t)sides[line->sidenum[1]].toptexture; while ((secnum = P_FindSectorFromLineTag(line, secnum)) >= 0) { boolean tryagain; sec = sectors + secnum; do { tryagain = false; for (thing = sec->thinglist; thing; thing = thing->snext) if (thing->type == type) { if (state != NUMSTATES) { if (!P_SetMobjState(thing, state)) // set state to specific state { // mobj was removed tryagain = true; // snext is corrupt, we'll have to start over. break; } } else if (!P_SetMobjState(thing, thing->state->nextstate)) // set state to nextstate { // mobj was removed tryagain = true; // snext is corrupt, we'll have to start over. break; } } } while (tryagain); } break; } case 443: // Calls a named Lua function #ifdef HAVE_BLUA LUAh_LinedefExecute(line, mo, callsec); #else CONS_Alert(CONS_ERROR, "The map is trying to run a Lua script, but this exe was not compiled with Lua support!\n"); #endif break; case 444: // Earthquake camera { quake.intensity = sides[line->sidenum[0]].textureoffset; quake.radius = sides[line->sidenum[0]].rowoffset; quake.time = P_AproxDistance(line->dx, line->dy)>>FRACBITS; quake.epicenter = NULL; /// \todo // reasonable defaults. if (!quake.intensity) quake.intensity = 8<<FRACBITS; if (!quake.radius) quake.radius = 512<<FRACBITS; break; } case 445: // Force block disappear remotely (reappear if noclimb) { INT16 sectag = (INT16)(sides[line->sidenum[0]].textureoffset>>FRACBITS); INT16 foftag = (INT16)(sides[line->sidenum[0]].rowoffset>>FRACBITS); sector_t *sec; // Sector that the FOF is visible (or not visible) in ffloor_t *rover; // FOF to vanish/un-vanish ffloortype_e oldflags; // store FOF's old flags for (secnum = -1; (secnum = P_FindSectorFromTag(sectag, secnum)) >= 0 ;) { sec = sectors + secnum; if (!sec->ffloors) { CONS_Debug(DBG_GAMELOGIC, "Line type 445 Executor: Target sector #%d has no FOFs.\n", secnum); return; } for (rover = sec->ffloors; rover; rover = rover->next) { if (rover->master->frontsector->tag == foftag) break; } if (!rover) { CONS_Debug(DBG_GAMELOGIC, "Line type 445 Executor: Can't find a FOF control sector with tag %d\n", foftag); return; } oldflags = rover->flags; // Abracadabra! if (line->flags & ML_NOCLIMB) rover->flags |= FF_EXISTS; else rover->flags &= ~FF_EXISTS; // if flags changed, reset sector's light list if (rover->flags != oldflags) sec->moved = true; } } break; case 450: // Execute Linedef Executor - for recursion P_LinedefExecute(line->tag, mo, NULL); break; case 451: // Execute Random Linedef Executor { INT32 rvalue1 = sides[line->sidenum[0]].textureoffset>>FRACBITS; INT32 rvalue2 = sides[line->sidenum[0]].rowoffset>>FRACBITS; INT32 result; if (rvalue1 <= rvalue2) result = P_RandomRange(rvalue1, rvalue2); else result = P_RandomRange(rvalue2, rvalue1); P_LinedefExecute((INT16)result, mo, NULL); break; } #ifdef POLYOBJECTS case 480: // Polyobj_DoorSlide case 481: // Polyobj_DoorSwing PolyDoor(line); break; case 482: // Polyobj_Move case 483: // Polyobj_OR_Move PolyMove(line); break; case 484: // Polyobj_RotateRight case 485: // Polyobj_OR_RotateRight case 486: // Polyobj_RotateLeft case 487: // Polyobj_OR_RotateLeft PolyRotate(line); break; case 488: // Polyobj_Waypoint PolyWaypoint(line); break; case 489: PolyInvisible(line); break; case 490: PolyVisible(line); break; case 491: PolyTranslucency(line); break; #endif default: break; } } // // P_SetupSignExit // // Finds the exit sign in the current sector and // sets its target to the player who passed the map. // void P_SetupSignExit(player_t *player) { mobj_t *thing; msecnode_t *node = player->mo->subsector->sector->touching_thinglist; // things touching this sector thinker_t *think; INT32 numfound = 0; for (; node; node = node->m_snext) { thing = node->m_thing; if (thing->type != MT_SIGN) continue; if (thing->state != &states[thing->info->spawnstate]) continue; P_SetTarget(&thing->target, player->mo); P_SetMobjState(thing, S_SIGN1); if (thing->info->seesound) S_StartSound(thing, thing->info->seesound); ++numfound; } if (numfound) return; // didn't find any signposts in the exit sector. // spin all signposts in the level then. for (think = thinkercap.next; think != &thinkercap; think = think->next) { if (think->function.acp1 != (actionf_p1)P_MobjThinker) continue; // not a mobj thinker thing = (mobj_t *)think; if (thing->type != MT_SIGN) continue; if (thing->state != &states[thing->info->spawnstate]) continue; P_SetTarget(&thing->target, player->mo); P_SetMobjState(thing, S_SIGN1); if (thing->info->seesound) S_StartSound(thing, thing->info->seesound); ++numfound; } } // // P_IsFlagAtBase // // Checks to see if a flag is at its base. // boolean P_IsFlagAtBase(mobjtype_t flag) { thinker_t *think; mobj_t *mo; INT32 specialnum = 0; for (think = thinkercap.next; think != &thinkercap; think = think->next) { if (think->function.acp1 != (actionf_p1)P_MobjThinker) continue; // not a mobj thinker mo = (mobj_t *)think; if (mo->type != flag) continue; if (mo->type == MT_REDFLAG) specialnum = 3; else if (mo->type == MT_BLUEFLAG) specialnum = 4; if (GETSECSPECIAL(mo->subsector->sector->special, 4) == specialnum) return true; else if (mo->subsector->sector->ffloors) // Check the 3D floors { ffloor_t *rover; for (rover = mo->subsector->sector->ffloors; rover; rover = rover->next) { if (!(rover->flags & FF_EXISTS)) continue; if (GETSECSPECIAL(rover->master->frontsector->special, 4) != specialnum) continue; if (mo->z <= *rover->topheight && mo->z >= *rover->bottomheight) return true; } } } return false; } // // P_PlayerTouchingSectorSpecial // // Replaces the old player->specialsector. // This allows a player to touch more than // one sector at a time, if necessary. // // Returns a pointer to the first sector of // the particular type that it finds. // Returns NULL if it doesn't find it. // sector_t *P_PlayerTouchingSectorSpecial(player_t *player, INT32 section, INT32 number) { msecnode_t *node; ffloor_t *rover; if (!player->mo) return NULL; // Check default case first if (GETSECSPECIAL(player->mo->subsector->sector->special, section) == number) return player->mo->subsector->sector; // Hmm.. maybe there's a FOF that has it... for (rover = player->mo->subsector->sector->ffloors; rover; rover = rover->next) { if (GETSECSPECIAL(rover->master->frontsector->special, section) != number) continue; if (!(rover->flags & FF_EXISTS)) continue; // Check the 3D floor's type... if (rover->flags & FF_BLOCKPLAYER) { // Thing must be on top of the floor to be affected... if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING)) { if ((player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z != *rover->topheight) continue; } else if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR)) { if (!(player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z + player->mo->height != *rover->bottomheight) continue; } else if (rover->master->frontsector->flags & SF_FLIPSPECIAL_BOTH) { if (!((player->mo->eflags & MFE_VERTICALFLIP && player->mo->z + player->mo->height == *rover->bottomheight) || (!(player->mo->eflags & MFE_VERTICALFLIP) && player->mo->z == *rover->topheight))) continue; } } else { // Water and DEATH FOG!!! heh if (player->mo->z > *rover->topheight || (player->mo->z + player->mo->height) < *rover->bottomheight) continue; } // This FOF has the special we're looking for! return rover->master->frontsector; } for (node = player->mo->touching_sectorlist; node; node = node->m_snext) { if (GETSECSPECIAL(node->m_sector->special, section) == number) { // This sector has the special we're looking for, but // are we allowed to touch it? if (node->m_sector == player->mo->subsector->sector || (node->m_sector->flags & SF_TRIGGERSPECIAL_TOUCH)) return node->m_sector; } // Hmm.. maybe there's a FOF that has it... for (rover = node->m_sector->ffloors; rover; rover = rover->next) { if (GETSECSPECIAL(rover->master->frontsector->special, section) != number) continue; if (!(rover->flags & FF_EXISTS)) continue; // Check the 3D floor's type... if (rover->flags & FF_BLOCKPLAYER) { // Thing must be on top of the floor to be affected... if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING)) { if ((player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z != *rover->topheight) continue; } else if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR)) { if (!(player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z + player->mo->height != *rover->bottomheight) continue; } else if (rover->master->frontsector->flags & SF_FLIPSPECIAL_BOTH) { if (!((player->mo->eflags & MFE_VERTICALFLIP && player->mo->z + player->mo->height == *rover->bottomheight) || (!(player->mo->eflags & MFE_VERTICALFLIP) && player->mo->z == *rover->topheight))) continue; } } else { // Water and DEATH FOG!!! heh if (player->mo->z > *rover->topheight || (player->mo->z + player->mo->height) < *rover->bottomheight) continue; } // This FOF has the special we're looking for, but are we allowed to touch it? if (node->m_sector == player->mo->subsector->sector || (rover->master->frontsector->flags & SF_TRIGGERSPECIAL_TOUCH)) return rover->master->frontsector; } } return NULL; } // // P_ThingIsOnThe3DFloor // // This checks whether the mobj is on/in the FOF we want it to be at // Needed for the "All players" trigger sector specials only // static boolean P_ThingIsOnThe3DFloor(mobj_t *mo, sector_t *sector, sector_t *targetsec) { ffloor_t *rover; fixed_t top, bottom; if (!mo->player) // should NEVER happen return false; if (!targetsec->ffloors) // also should NEVER happen return false; for (rover = targetsec->ffloors; rover; rover = rover->next) { if (rover->master->frontsector != sector) continue; // we're assuming the FOF existed when the first player touched it //if (!(rover->flags & FF_EXISTS)) // return false; top = P_GetSpecialTopZ(mo, sector, targetsec); bottom = P_GetSpecialBottomZ(mo, sector, targetsec); // Check the 3D floor's type... if (rover->flags & FF_BLOCKPLAYER) { // Thing must be on top of the floor to be affected... if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING)) { if ((mo->eflags & MFE_VERTICALFLIP) || mo->z != top) return false; } else if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR)) { if (!(mo->eflags & MFE_VERTICALFLIP) || mo->z + mo->height != bottom) return false; } else if (rover->master->frontsector->flags & SF_FLIPSPECIAL_BOTH) { if (!((mo->eflags & MFE_VERTICALFLIP && mo->z + mo->height == bottom) || (!(mo->eflags & MFE_VERTICALFLIP) && mo->z == top))) return false; } } else { // Water and intangible FOFs if (mo->z > top || (mo->z + mo->height) < bottom) return false; } return true; } return false; } // // P_MobjReadyToTrigger // // Is player standing on the sector's "ground"? // static inline boolean P_MobjReadyToTrigger(mobj_t *mo, sector_t *sec) { if (mo->eflags & MFE_VERTICALFLIP) return (mo->z+mo->height == P_GetSpecialTopZ(mo, sec, sec) && sec->flags & SF_FLIPSPECIAL_CEILING); else return (mo->z == P_GetSpecialBottomZ(mo, sec, sec) && sec->flags & SF_FLIPSPECIAL_FLOOR); } /** Applies a sector special to a player. * * \param player Player in the sector. * \param sector Sector with the special. * \param roversector If !NULL, sector is actually an FOF; otherwise, sector * is being physically contacted by the player. * \todo Split up into multiple functions. * \sa P_PlayerInSpecialSector, P_PlayerOnSpecial3DFloor */ void P_ProcessSpecialSector(player_t *player, sector_t *sector, sector_t *roversector) { INT32 i = 0; INT32 section1, section2, section3, section4; INT32 special; section1 = GETSECSPECIAL(sector->special, 1); section2 = GETSECSPECIAL(sector->special, 2); section3 = GETSECSPECIAL(sector->special, 3); section4 = GETSECSPECIAL(sector->special, 4); // Ignore spectators if (player->spectator) return; // Ignore dead players. // If this strange phenomenon could be potentially used in levels, // TODO: modify this to accommodate for it. if (player->playerstate == PST_DEAD) return; // Conveyor stuff if (section3 == 2 || section3 == 4) player->onconveyor = section3; special = section1; // Process Section 1 switch (special) { case 1: // Damage (Generic) if (roversector || P_MobjReadyToTrigger(player->mo, sector)) P_DamageMobj(player->mo, NULL, NULL, 1); break; case 2: // Damage (Water) if ((roversector || P_MobjReadyToTrigger(player->mo, sector)) && (player->powers[pw_underwater] || player->pflags & PF_NIGHTSMODE) && (player->powers[pw_shield] & SH_NOSTACK) != SH_ELEMENTAL) P_DamageMobj(player->mo, NULL, NULL, 1); break; case 3: // Damage (Fire) if ((roversector || P_MobjReadyToTrigger(player->mo, sector)) && (player->powers[pw_shield] & SH_NOSTACK) != SH_ELEMENTAL) P_DamageMobj(player->mo, NULL, NULL, 1); break; case 4: // Damage (Electrical) if ((roversector || P_MobjReadyToTrigger(player->mo, sector)) && (player->powers[pw_shield] & SH_NOSTACK) != SH_ATTRACT) P_DamageMobj(player->mo, NULL, NULL, 1); break; case 5: // Spikes // Don't do anything. In Soviet Russia, spikes find you. break; case 6: // Death Pit (Camera Mod) case 7: // Death Pit (No Camera Mod) if (roversector || P_MobjReadyToTrigger(player->mo, sector)) P_DamageMobj(player->mo, NULL, NULL, 10000); break; case 8: // Instant Kill P_DamageMobj(player->mo, NULL, NULL, 10000); break; case 9: // Ring Drainer (Floor Touch) case 10: // Ring Drainer (No Floor Touch) if (leveltime % (TICRATE/2) == 0 && player->mo->health > 1) { player->mo->health--; player->health--; S_StartSound(player->mo, sfx_itemup); } break; case 11: // Special Stage Damage - Kind of like a mini-P_DamageMobj() if (player->powers[pw_invulnerability] || player->powers[pw_flashing] || player->powers[pw_super] || player->exiting || player->bot) break; if (!(player->powers[pw_shield] || player->mo->health > 1)) // Don't do anything if no shield or rings anyway break; if (player->powers[pw_shield]) { P_RemoveShield(player); S_StartSound(player->mo, sfx_shldls); // Ba-Dum! Shield loss. } else if (player->mo->health > 1) { P_PlayRinglossSound(player->mo); if (player->mo->health > 10) player->mo->health -= 10; else player->mo->health = 1; player->health = player->mo->health; } P_DoPlayerPain(player, NULL, NULL); // this does basically everything that was here before if (gametype == GT_CTF && player->gotflag & (GF_REDFLAG|GF_BLUEFLAG)) P_PlayerFlagBurst(player, false); break; case 12: // Space Countdown if ((player->powers[pw_shield] & SH_NOSTACK) != SH_ELEMENTAL && !player->powers[pw_spacetime]) player->powers[pw_spacetime] = spacetimetics + 1; break; case 13: // Ramp Sector (Increase step-up/down) case 14: // Non-Ramp Sector (Don't step-down) case 15: // Bouncy Sector (FOF Control Only) break; } special = section2; // Process Section 2 switch (special) { case 1: // Trigger Linedef Exec (Pushable Objects) break; case 2: // Linedef executor requires all players present+doesn't require touching floor case 3: // Linedef executor requires all players present /// \todo check continues for proper splitscreen support? for (i = 0; i < MAXPLAYERS; i++) if (playeringame[i] && !players[i].bot && players[i].mo && (gametype != GT_COOP || players[i].lives > 0)) { if (roversector) { if (players[i].mo->subsector->sector != roversector) goto DoneSection2; if (!P_ThingIsOnThe3DFloor(players[i].mo, sector, roversector)) goto DoneSection2; } else { if (players[i].mo->subsector->sector != sector) goto DoneSection2; if (special == 3 && !P_MobjReadyToTrigger(players[i].mo, sector)) goto DoneSection2; } } case 4: // Linedef executor that doesn't require touching floor case 5: // Linedef executor case 6: // Linedef executor (7 Emeralds) case 7: // Linedef executor (NiGHTS Mare) if (!player->bot) P_LinedefExecute(sector->tag, player->mo, sector); break; case 8: // Tells pushable things to check FOFs break; case 9: // Egg trap capsule { thinker_t *th; mobj_t *mo2; line_t junk; if (player->bot || sector->ceilingdata || sector->floordata) return; // Find the center of the Eggtrap and release all the pretty animals! // The chimps are my friends.. heeheeheheehehee..... - LouisJM for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)P_MobjThinker) continue; mo2 = (mobj_t *)th; if (mo2->type == MT_EGGTRAP) P_KillMobj(mo2, NULL, player->mo); } // clear the special so you can't push the button twice. sector->special = 0; // Move the button down junk.tag = 680; EV_DoElevator(&junk, elevateDown, false); // Open the top FOF junk.tag = 681; EV_DoFloor(&junk, raiseFloorToNearestFast); // Open the bottom FOF junk.tag = 682; EV_DoCeiling(&junk, lowerToLowestFast); // Mark all players with the time to exit thingy! for (i = 0; i < MAXPLAYERS; i++) P_DoPlayerExit(&players[i]); break; } case 10: // Special Stage Time/Rings case 11: // Custom Gravity break; case 12: // Lua sector special break; } DoneSection2: special = section3; // Process Section 3 switch (special) { case 1: // Ice/Sludge case 2: // Wind/Current case 3: // Ice/Sludge and Wind/Current case 4: // Conveyor Belt break; case 5: // Speed pad w/o spin case 6: // Speed pad w/ spin if (player->powers[pw_flashing] != 0 && player->powers[pw_flashing] < TICRATE/2) break; i = P_FindSpecialLineFromTag(4, sector->tag, -1); if (i != -1) { angle_t lineangle; fixed_t linespeed; lineangle = R_PointToAngle2(lines[i].v1->x, lines[i].v1->y, lines[i].v2->x, lines[i].v2->y); linespeed = P_AproxDistance(lines[i].v2->x-lines[i].v1->x, lines[i].v2->y-lines[i].v1->y); player->mo->angle = lineangle; if (!demoplayback || P_AnalogMove(player)) { if (player == &players[consoleplayer]) localangle = player->mo->angle; else if (player == &players[secondarydisplayplayer]) localangle2 = player->mo->angle; } if (!(lines[i].flags & ML_EFFECT4)) { P_UnsetThingPosition(player->mo); if (roversector) // make FOF speed pads work { player->mo->x = roversector->soundorg.x; player->mo->y = roversector->soundorg.y; } else { player->mo->x = sector->soundorg.x; player->mo->y = sector->soundorg.y; } P_SetThingPosition(player->mo); } P_InstaThrust(player->mo, player->mo->angle, linespeed); if (GETSECSPECIAL(sector->special, 3) == 6 && (player->charability2 == CA2_SPINDASH)) { if (!(player->pflags & PF_SPINNING)) player->pflags |= PF_SPINNING; P_SetPlayerMobjState(player->mo, S_PLAY_ATK1); } player->powers[pw_flashing] = TICRATE/3; S_StartSound(player->mo, sfx_spdpad); } break; case 7: // Bustable block sprite parameter case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: break; } special = section4; // Process Section 4 switch (special) { case 1: // Starpost Activator { mobj_t *post = P_GetObjectTypeInSectorNum(MT_STARPOST, sector - sectors); if (!post) break; P_TouchSpecialThing(post, player->mo, false); break; } case 2: // Special stage GOAL sector / Exit Sector / CTF Flag Return if (player->bot) break; if (!useNightsSS && G_IsSpecialStage(gamemap) && sstimer > 6) sstimer = 6; // Just let P_Ticker take care of the rest. // Exit (for FOF exits; others are handled in P_PlayerThink in p_user.c) { INT32 lineindex; P_DoPlayerExit(player); P_SetupSignExit(player); // important: use sector->tag on next line instead of player->mo->subsector->tag // this part is different from in P_PlayerThink, this is what was causing // FOF custom exits not to work. lineindex = P_FindSpecialLineFromTag(2, sector->tag, -1); if (gametype == GT_COOP && lineindex != -1) // Custom exit! { // Special goodies with the block monsters flag depending on emeralds collected if ((lines[lineindex].flags & ML_BLOCKMONSTERS) && ALL7EMERALDS(emeralds)) nextmapoverride = (INT16)(lines[lineindex].frontsector->ceilingheight>>FRACBITS); else nextmapoverride = (INT16)(lines[lineindex].frontsector->floorheight>>FRACBITS); if (lines[lineindex].flags & ML_NOCLIMB) skipstats = true; } } break; case 3: // Red Team's Base if (gametype == GT_CTF && P_IsObjectOnGround(player->mo)) { if (player->ctfteam == 1 && (player->gotflag & GF_BLUEFLAG)) { mobj_t *mo; // Make sure the red team still has their own // flag at their base so they can score. if (!P_IsFlagAtBase(MT_REDFLAG)) break; HU_SetCEchoFlags(0); HU_SetCEchoDuration(5); HU_DoCEcho(va(M_GetText("%s\\captured the blue flag.\\\\\\\\"), player_names[player-players])); if (splitscreen || players[consoleplayer].ctfteam == 1) S_StartSound(NULL, sfx_flgcap); else if (players[consoleplayer].ctfteam == 2) S_StartSound(NULL, sfx_lose); mo = P_SpawnMobj(player->mo->x,player->mo->y,player->mo->z,MT_BLUEFLAG); player->gotflag &= ~GF_BLUEFLAG; mo->flags &= ~MF_SPECIAL; mo->fuse = TICRATE; mo->spawnpoint = bflagpoint; mo->flags2 |= MF2_JUSTATTACKED; redscore += 1; P_AddPlayerScore(player, 250); } } break; case 4: // Blue Team's Base if (gametype == GT_CTF && P_IsObjectOnGround(player->mo)) { if (player->ctfteam == 2 && (player->gotflag & GF_REDFLAG)) { mobj_t *mo; // Make sure the blue team still has their own // flag at their base so they can score. if (!P_IsFlagAtBase(MT_BLUEFLAG)) break; HU_SetCEchoFlags(0); HU_SetCEchoDuration(5); HU_DoCEcho(va(M_GetText("%s\\captured the red flag.\\\\\\\\"), player_names[player-players])); if (splitscreen || players[consoleplayer].ctfteam == 2) S_StartSound(NULL, sfx_flgcap); else if (players[consoleplayer].ctfteam == 1) S_StartSound(NULL, sfx_lose); mo = P_SpawnMobj(player->mo->x,player->mo->y,player->mo->z,MT_REDFLAG); player->gotflag &= ~GF_REDFLAG; mo->flags &= ~MF_SPECIAL; mo->fuse = TICRATE; mo->spawnpoint = rflagpoint; mo->flags2 |= MF2_JUSTATTACKED; bluescore += 1; P_AddPlayerScore(player, 250); } } break; case 5: // Fan sector player->mo->momz += mobjinfo[MT_FAN].mass/4; if (player->mo->momz > mobjinfo[MT_FAN].mass) player->mo->momz = mobjinfo[MT_FAN].mass; P_ResetPlayer(player); if (player->panim != PA_FALL) P_SetPlayerMobjState(player->mo, S_PLAY_FALL1); break; case 6: // Super Sonic transformer if (player->mo->health > 0 && !player->bot && (player->charflags & SF_SUPER) && !player->powers[pw_super] && ALL7EMERALDS(emeralds)) P_DoSuperTransformation(player, true); break; case 7: // Make player spin if (!(player->pflags & PF_SPINNING) && P_IsObjectOnGround(player->mo) && (player->charability2 == CA2_SPINDASH)) { player->pflags |= PF_SPINNING; P_SetPlayerMobjState(player->mo, S_PLAY_ATK1); S_StartAttackSound(player->mo, sfx_spin); if (abs(player->rmomx) < FixedMul(5*FRACUNIT, player->mo->scale) && abs(player->rmomy) < FixedMul(5*FRACUNIT, player->mo->scale)) P_InstaThrust(player->mo, player->mo->angle, FixedMul(10*FRACUNIT, player->mo->scale)); } break; case 8: // Zoom Tube Start { INT32 sequence; fixed_t speed; INT32 lineindex; thinker_t *th; mobj_t *waypoint = NULL; mobj_t *mo2; angle_t an; if (player->mo->tracer && player->mo->tracer->type == MT_TUBEWAYPOINT) break; // Find line #3 tagged to this sector lineindex = P_FindSpecialLineFromTag(3, sector->tag, -1); if (lineindex == -1) { CONS_Debug(DBG_GAMELOGIC, "ERROR: Sector special %d missing line special #3.\n", sector->special); break; } // Grab speed and sequence values speed = abs(lines[lineindex].dx)/8; sequence = abs(lines[lineindex].dy)>>FRACBITS; // scan the thinkers // to find the first waypoint for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)P_MobjThinker) continue; mo2 = (mobj_t *)th; if (mo2->type == MT_TUBEWAYPOINT && mo2->threshold == sequence && mo2->health == 0) { waypoint = mo2; break; } } if (!waypoint) { CONS_Debug(DBG_GAMELOGIC, "ERROR: FIRST WAYPOINT IN SEQUENCE %d NOT FOUND.\n", sequence); break; } else { CONS_Debug(DBG_GAMELOGIC, "Waypoint %d found in sequence %d - speed = %d\n", waypoint->health, sequence, speed); } an = R_PointToAngle2(player->mo->x, player->mo->y, waypoint->x, waypoint->y) - player->mo->angle; if (an > ANGLE_90 && an < ANGLE_270 && !(lines[lineindex].flags & ML_EFFECT4)) break; // behind back P_SetTarget(&player->mo->tracer, waypoint); player->speed = speed; player->pflags |= PF_SPINNING; player->pflags &= ~PF_JUMPED; player->pflags &= ~PF_GLIDING; player->climbing = 0; if (!(player->mo->state >= &states[S_PLAY_ATK1] && player->mo->state <= &states[S_PLAY_ATK4])) { P_SetPlayerMobjState(player->mo, S_PLAY_ATK1); S_StartSound(player->mo, sfx_spin); } } break; case 9: // Zoom Tube End { INT32 sequence; fixed_t speed; INT32 lineindex; thinker_t *th; mobj_t *waypoint = NULL; mobj_t *mo2; angle_t an; if (player->mo->tracer && player->mo->tracer->type == MT_TUBEWAYPOINT) break; // Find line #3 tagged to this sector lineindex = P_FindSpecialLineFromTag(3, sector->tag, -1); if (lineindex == -1) { CONS_Debug(DBG_GAMELOGIC, "ERROR: Sector special %d missing line special #3.\n", sector->special); break; } // Grab speed and sequence values speed = -(abs(lines[lineindex].dx)/8); // Negative means reverse sequence = abs(lines[lineindex].dy)>>FRACBITS; // scan the thinkers // to find the last waypoint for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)P_MobjThinker) continue; mo2 = (mobj_t *)th; if (mo2->type == MT_TUBEWAYPOINT && mo2->threshold == sequence) { if (!waypoint) waypoint = mo2; else if (mo2->health > waypoint->health) waypoint = mo2; } } if (!waypoint) { CONS_Debug(DBG_GAMELOGIC, "ERROR: LAST WAYPOINT IN SEQUENCE %d NOT FOUND.\n", sequence); break; } else { CONS_Debug(DBG_GAMELOGIC, "Waypoint %d found in sequence %d - speed = %d\n", waypoint->health, sequence, speed); } an = R_PointToAngle2(player->mo->x, player->mo->y, waypoint->x, waypoint->y) - player->mo->angle; if (an > ANGLE_90 && an < ANGLE_270 && !(lines[lineindex].flags & ML_EFFECT4)) break; // behind back P_SetTarget(&player->mo->tracer, waypoint); player->speed = speed; player->pflags |= PF_SPINNING; player->pflags &= ~PF_JUMPED; if (!(player->mo->state >= &states[S_PLAY_ATK1] && player->mo->state <= &states[S_PLAY_ATK4])) { P_SetPlayerMobjState(player->mo, S_PLAY_ATK1); S_StartSound(player->mo, sfx_spin); } } break; case 10: // Finish Line if (gametype == GT_RACE && !player->exiting) { if (player->starpostnum == numstarposts) // Must have touched all the starposts { player->laps++; if (player->pflags & PF_NIGHTSMODE) player->drillmeter += 48*20; if (player->laps >= (UINT8)cv_numlaps.value) CONS_Printf(M_GetText("%s has finished the race.\n"), player_names[player-players]); else CONS_Printf(M_GetText("%s started lap %u\n"), player_names[player-players], (UINT32)player->laps+1); // Reset starposts (checkpoints) info player->starpostangle = player->starposttime = player->starpostnum = 0; player->starpostx = player->starposty = player->starpostz = 0; P_ResetStarposts(); // Play the starpost sound for 'consistency' S_StartSound(player->mo, sfx_strpst); } else if (player->starpostnum) { // blatant reuse of a variable that's normally unused in circuit if (!player->tossdelay) S_StartSound(player->mo, sfx_lose); player->tossdelay = 3; } if (player->laps >= (unsigned)cv_numlaps.value) { if (P_IsLocalPlayer(player)) { HU_SetCEchoFlags(0); HU_SetCEchoDuration(5); HU_DoCEcho("FINISHED!"); } P_DoPlayerExit(player); } } break; case 11: // Rope hang { INT32 sequence; fixed_t speed; INT32 lineindex; thinker_t *th; mobj_t *waypointmid = NULL; mobj_t *waypointhigh = NULL; mobj_t *waypointlow = NULL; mobj_t *mo2; mobj_t *closest = NULL; line_t junk; vertex_t v1, v2, resulthigh, resultlow; mobj_t *highest = NULL; if (player->mo->tracer && player->mo->tracer->type == MT_TUBEWAYPOINT) break; if (player->mo->momz > 0) break; if (player->cmd.buttons & BT_USE) break; if (!(player->pflags & PF_SLIDING) && player->mo->state == &states[player->mo->info->painstate]) break; if (player->exiting) break; //initialize resulthigh and resultlow with 0 memset(&resultlow, 0x00, sizeof(resultlow)); memset(&resulthigh, 0x00, sizeof(resulthigh)); // Find line #11 tagged to this sector lineindex = P_FindSpecialLineFromTag(11, sector->tag, -1); if (lineindex == -1) { CONS_Debug(DBG_GAMELOGIC, "ERROR: Sector special %d missing line special #11.\n", sector->special); break; } // Grab speed and sequence values speed = abs(lines[lineindex].dx)/8; sequence = abs(lines[lineindex].dy)>>FRACBITS; // Find the closest waypoint // Find the preceding waypoint // Find the proceeding waypoint // Determine the closest spot on the line between the three waypoints // Put player at that location. // scan the thinkers // to find the first waypoint for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)P_MobjThinker) continue; mo2 = (mobj_t *)th; if (mo2->type != MT_TUBEWAYPOINT) continue; if (mo2->threshold != sequence) continue; if (!highest) highest = mo2; else if (mo2->health > highest->health) // Find the highest waypoint # in case we wrap highest = mo2; if (closest && P_AproxDistance(P_AproxDistance(player->mo->x-mo2->x, player->mo->y-mo2->y), player->mo->z-mo2->z) > P_AproxDistance(P_AproxDistance(player->mo->x-closest->x, player->mo->y-closest->y), player->mo->z-closest->z)) continue; // Found a target closest = mo2; } waypointmid = closest; closest = NULL; if (waypointmid == NULL) { CONS_Debug(DBG_GAMELOGIC, "ERROR: WAYPOINT(S) IN SEQUENCE %d NOT FOUND.\n", sequence); break; } // Find waypoint before this one (waypointlow) for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)P_MobjThinker) continue; mo2 = (mobj_t *)th; if (mo2->type != MT_TUBEWAYPOINT) continue; if (mo2->threshold != sequence) continue; if (waypointmid->health == 0) { if (mo2->health != highest->health) continue; } else if (mo2->health != waypointmid->health - 1) continue; // Found a target waypointlow = mo2; break; } // Find waypoint after this one (waypointhigh) for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 != (actionf_p1)P_MobjThinker) continue; mo2 = (mobj_t *)th; if (mo2->type != MT_TUBEWAYPOINT) continue; if (mo2->threshold != sequence) continue; if (waypointmid->health == highest->health) { if (mo2->health != 0) continue; } else if (mo2->health != waypointmid->health + 1) continue; // Found a target waypointhigh = mo2; break; } CONS_Debug(DBG_GAMELOGIC, "WaypointMid: %d; WaypointLow: %d; WaypointHigh: %d\n", waypointmid->health, waypointlow ? waypointlow->health : -1, waypointhigh ? waypointhigh->health : -1); // Now we have three waypoints... the closest one we're near, and the one that comes before, and after. // Next, we need to find the closest point on the line between each set, and determine which one we're // closest to. // Waypointmid and Waypointlow: if (waypointlow) { v1.x = waypointmid->x; v1.y = waypointmid->y; v1.z = waypointmid->z; v2.x = waypointlow->x; v2.y = waypointlow->y; v2.z = waypointlow->z; junk.v1 = &v1; junk.v2 = &v2; junk.dx = v2.x - v1.x; junk.dy = v2.y - v1.y; P_ClosestPointOnLine3D(player->mo->x, player->mo->y, player->mo->z, &junk, &resultlow); } // Waypointmid and Waypointhigh: if (waypointhigh) { v1.x = waypointmid->x; v1.y = waypointmid->y; v1.z = waypointmid->z; v2.x = waypointhigh->x; v2.y = waypointhigh->y; v2.z = waypointhigh->z; junk.v1 = &v1; junk.v2 = &v2; junk.dx = v2.x - v1.x; junk.dy = v2.y - v1.y; P_ClosestPointOnLine3D(player->mo->x, player->mo->y, player->mo->z, &junk, &resulthigh); } // 3D support now available. Disregard the previous notice here. -Red P_UnsetThingPosition(player->mo); P_ResetPlayer(player); player->mo->momx = player->mo->momy = player->mo->momz = 0; if (lines[lineindex].flags & ML_EFFECT1) // Don't wrap { highest->flags |= MF_SLIDEME; } // Changing the conditions on these ifs to fix issues with snapping to the wrong spot -Red if ((lines[lineindex].flags & ML_EFFECT1) && waypointmid->health == 0) { closest = waypointhigh; player->mo->x = resulthigh.x; player->mo->y = resulthigh.y; player->mo->z = resulthigh.z - P_GetPlayerHeight(player); } else if ((lines[lineindex].flags & ML_EFFECT1) && waypointmid->health == highest->health) { closest = waypointmid; player->mo->x = resultlow.x; player->mo->y = resultlow.y; player->mo->z = resultlow.z - P_GetPlayerHeight(player); } else { if (P_AproxDistance(P_AproxDistance(player->mo->x-resultlow.x, player->mo->y-resultlow.y), player->mo->z-resultlow.z) < P_AproxDistance(P_AproxDistance(player->mo->x-resulthigh.x, player->mo->y-resulthigh.y), player->mo->z-resulthigh.z)) { // Line between Mid and Low is closer closest = waypointmid; player->mo->x = resultlow.x; player->mo->y = resultlow.y; player->mo->z = resultlow.z - P_GetPlayerHeight(player); } else { // Line between Mid and High is closer closest = waypointhigh; player->mo->x = resulthigh.x; player->mo->y = resulthigh.y; player->mo->z = resulthigh.z - P_GetPlayerHeight(player); } } P_SetTarget(&player->mo->tracer, closest); // Option for static ropes. if (lines[lineindex].flags & ML_NOCLIMB) player->speed = 0; else player->speed = speed; player->pflags |= PF_ROPEHANG; S_StartSound(player->mo, sfx_s3k4a); player->pflags &= ~PF_JUMPED; player->pflags &= ~PF_GLIDING; player->pflags &= ~PF_SLIDING; player->climbing = 0; P_SetThingPosition(player->mo); P_SetPlayerMobjState(player->mo, S_PLAY_CARRY); } break; case 12: // Camera noclip case 13: // Unused case 14: // Unused case 15: // Unused break; } } /** Checks if an object is standing on or is inside a special 3D floor. * If so, the sector is returned. * * \param mo Object to check. * \return Pointer to the sector with a special type, or NULL if no special 3D * floors are being contacted. * \sa P_PlayerOnSpecial3DFloor */ sector_t *P_ThingOnSpecial3DFloor(mobj_t *mo) { sector_t *sector; ffloor_t *rover; sector = mo->subsector->sector; if (!sector->ffloors) return NULL; for (rover = sector->ffloors; rover; rover = rover->next) { if (!rover->master->frontsector->special) continue; if (!(rover->flags & FF_EXISTS)) continue; // Check the 3D floor's type... if (((rover->flags & FF_BLOCKPLAYER) && mo->player) || ((rover->flags & FF_BLOCKOTHERS) && !mo->player)) { // Thing must be on top of the floor to be affected... if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING)) { if ((mo->eflags & MFE_VERTICALFLIP) || mo->z != *rover->topheight) continue; } else if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR)) { if (!(mo->eflags & MFE_VERTICALFLIP) || mo->z + mo->height != *rover->bottomheight) continue; } else if (rover->master->frontsector->flags & SF_FLIPSPECIAL_BOTH) { if (!((mo->eflags & MFE_VERTICALFLIP && mo->z + mo->height == *rover->bottomheight) || (!(mo->eflags & MFE_VERTICALFLIP) && mo->z == *rover->topheight))) continue; } } else { // Water and intangible FOFs if (mo->z > *rover->topheight || (mo->z + mo->height) < *rover->bottomheight) continue; } return rover->master->frontsector; } return NULL; } /** Checks if a player is standing on or is inside a 3D floor (e.g. water) and * applies any specials. * * \param player Player to check. * \sa P_ThingOnSpecial3DFloor, P_PlayerInSpecialSector */ static void P_PlayerOnSpecial3DFloor(player_t *player, sector_t *sector) { ffloor_t *rover; for (rover = sector->ffloors; rover; rover = rover->next) { if (!rover->master->frontsector->special) continue; if (!(rover->flags & FF_EXISTS)) continue; // Check the 3D floor's type... if (rover->flags & FF_BLOCKPLAYER) { // Thing must be on top of the floor to be affected... if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING)) { if ((player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z != P_GetSpecialTopZ(player->mo, sectors + rover->secnum, sector)) continue; } else if ((rover->master->frontsector->flags & SF_FLIPSPECIAL_CEILING) && !(rover->master->frontsector->flags & SF_FLIPSPECIAL_FLOOR)) { if (!(player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z + player->mo->height != P_GetSpecialBottomZ(player->mo, sectors + rover->secnum, sector)) continue; } else if (rover->master->frontsector->flags & SF_FLIPSPECIAL_BOTH) { if (!((player->mo->eflags & MFE_VERTICALFLIP && player->mo->z + player->mo->height == P_GetSpecialBottomZ(player->mo, sectors + rover->secnum, sector)) || (!(player->mo->eflags & MFE_VERTICALFLIP) && player->mo->z == P_GetSpecialTopZ(player->mo, sectors + rover->secnum, sector)))) continue; } } else { // Water and DEATH FOG!!! heh if (player->mo->z > P_GetSpecialTopZ(player->mo, sectors + rover->secnum, sector) || (player->mo->z + player->mo->height) < P_GetSpecialBottomZ(player->mo, sectors + rover->secnum, sector)) continue; } // This FOF has the special we're looking for, but are we allowed to touch it? if (sector == player->mo->subsector->sector || (rover->master->frontsector->flags & SF_TRIGGERSPECIAL_TOUCH)) P_ProcessSpecialSector(player, rover->master->frontsector, sector); } // Allow sector specials to be applied to polyobjects! if (player->mo->subsector->polyList) { polyobj_t *po = player->mo->subsector->polyList; sector_t *polysec; boolean touching = false; boolean inside = false; while(po) { if (po->flags & POF_NOSPECIALS) { po = (polyobj_t *)(po->link.next); continue; } polysec = po->lines[0]->backsector; if ((polysec->flags & SF_TRIGGERSPECIAL_TOUCH)) touching = P_MobjTouchingPolyobj(po, player->mo); else touching = false; inside = P_MobjInsidePolyobj(po, player->mo); if (!(inside || touching)) { po = (polyobj_t *)(po->link.next); continue; } // We're inside it! Yess... if (!polysec->special) { po = (polyobj_t *)(po->link.next); continue; } if (!(po->flags & POF_TESTHEIGHT)) // Don't do height checking { } else if (po->flags & POF_SOLID) { // Thing must be on top of the floor to be affected... if ((polysec->flags & SF_FLIPSPECIAL_FLOOR) && !(polysec->flags & SF_FLIPSPECIAL_CEILING)) { if ((player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z != polysec->ceilingheight) { po = (polyobj_t *)(po->link.next); continue; } } else if ((polysec->flags & SF_FLIPSPECIAL_CEILING) && !(polysec->flags & SF_FLIPSPECIAL_FLOOR)) { if (!(player->mo->eflags & MFE_VERTICALFLIP) || player->mo->z + player->mo->height != polysec->floorheight) { po = (polyobj_t *)(po->link.next); continue; } } else if (polysec->flags & SF_FLIPSPECIAL_BOTH) { if (!((player->mo->eflags & MFE_VERTICALFLIP && player->mo->z + player->mo->height == polysec->floorheight) || (!(player->mo->eflags & MFE_VERTICALFLIP) && player->mo->z == polysec->ceilingheight))) { po = (polyobj_t *)(po->link.next); continue; } } } else { // Water and DEATH FOG!!! heh if (player->mo->z > polysec->ceilingheight || (player->mo->z + player->mo->height) < polysec->floorheight) { po = (polyobj_t *)(po->link.next); continue; } } P_ProcessSpecialSector(player, polysec, sector); po = (polyobj_t *)(po->link.next); } } } #define VDOORSPEED (FRACUNIT*2) // // P_RunSpecialSectorCheck // // Helper function to P_PlayerInSpecialSector // static void P_RunSpecialSectorCheck(player_t *player, sector_t *sector) { boolean nofloorneeded = false; fixed_t f_affectpoint, c_affectpoint; if (!sector->special) // nothing special, exit return; if (GETSECSPECIAL(sector->special, 2) == 9) // Egg trap capsule -- should only be for 3dFloors! return; // The list of specials that activate without floor touch // Check Section 1 switch(GETSECSPECIAL(sector->special, 1)) { case 2: // Damage (water) case 8: // Instant kill case 10: // Ring drainer that doesn't require floor touch case 12: // Space countdown nofloorneeded = true; break; } // Check Section 2 switch(GETSECSPECIAL(sector->special, 2)) { case 2: // Linedef executor (All players needed) case 4: // Linedef executor case 6: // Linedef executor (7 Emeralds) case 7: // Linedef executor (NiGHTS Mare) nofloorneeded = true; break; } // Check Section 3 /* switch(GETSECSPECIAL(sector->special, 3)) { }*/ // Check Section 4 switch(GETSECSPECIAL(sector->special, 4)) { case 2: // Level Exit / GOAL Sector / Flag Return if (!useNightsSS && G_IsSpecialStage(gamemap)) { // Special stage GOAL sector // requires touching floor. break; } case 1: // Starpost activator case 5: // Fan sector case 6: // Super Sonic Transform case 8: // Zoom Tube Start case 9: // Zoom Tube End case 10: // Finish line nofloorneeded = true; break; } if (nofloorneeded) { P_ProcessSpecialSector(player, sector, NULL); return; } f_affectpoint = P_GetSpecialBottomZ(player->mo, sector, sector); c_affectpoint = P_GetSpecialTopZ(player->mo, sector, sector); // Only go further if on the ground if ((sector->flags & SF_FLIPSPECIAL_FLOOR) && !(sector->flags & SF_FLIPSPECIAL_CEILING) && player->mo->z != f_affectpoint) return; if ((sector->flags & SF_FLIPSPECIAL_CEILING) && !(sector->flags & SF_FLIPSPECIAL_FLOOR) && player->mo->z + player->mo->height != c_affectpoint) return; if ((sector->flags & SF_FLIPSPECIAL_BOTH) && player->mo->z != f_affectpoint && player->mo->z + player->mo->height != c_affectpoint) return; P_ProcessSpecialSector(player, sector, NULL); } /** Checks if the player is in a special sector or FOF and apply any specials. * * \param player Player to check. * \sa P_PlayerOnSpecial3DFloor, P_ProcessSpecialSector */ void P_PlayerInSpecialSector(player_t *player) { sector_t *sector; msecnode_t *node; if (!player->mo) return; // Do your ->subsector->sector first sector = player->mo->subsector->sector; P_PlayerOnSpecial3DFloor(player, sector); // After P_PlayerOnSpecial3DFloor, recheck if the player is in that sector, // because the player can be teleported in between these times. if (sector == player->mo->subsector->sector) P_RunSpecialSectorCheck(player, sector); // Iterate through touching_sectorlist for (node = player->mo->touching_sectorlist; node; node = node->m_snext) { sector = node->m_sector; if (sector == player->mo->subsector->sector) // Don't duplicate continue; // Check 3D floors... P_PlayerOnSpecial3DFloor(player, sector); if (!(sector->flags & SF_TRIGGERSPECIAL_TOUCH)) return; // After P_PlayerOnSpecial3DFloor, recheck if the player is in that sector, // because the player can be teleported in between these times. if (sector == player->mo->subsector->sector) P_RunSpecialSectorCheck(player, sector); } } /** Animate planes, scroll walls, etc. and keeps track of level timelimit and exits if time is up. * * \sa P_CheckTimeLimit, P_CheckPointLimit */ void P_UpdateSpecials(void) { anim_t *anim; INT32 i; INT32 pic; size_t j; levelflat_t *foundflats; // for flat animation // LEVEL TIMER P_CheckTimeLimit(); // POINT LIMIT P_CheckPointLimit(); #ifdef ESLOPE // Dynamic slopeness P_RunDynamicSlopes(); #endif // ANIMATE TEXTURES for (anim = anims; anim < lastanim; anim++) { for (i = 0; i < anim->numpics; i++) { pic = anim->basepic + ((leveltime/anim->speed + i) % anim->numpics); if (anim->istexture) texturetranslation[anim->basepic+i] = pic; } } // ANIMATE FLATS /// \todo do not check the non-animate flat.. link the animated ones? /// \note its faster than the original anywaysince it animates only /// flats used in the level, and there's usually very few of them foundflats = levelflats; for (j = 0; j < numlevelflats; j++, foundflats++) { if (foundflats->speed) // it is an animated flat { // update the levelflat lump number foundflats->lumpnum = foundflats->baselumpnum + ((leveltime/foundflats->speed + foundflats->animseq) % foundflats->numpics); } } } static inline ffloor_t *P_GetFFloorBySec(sector_t *sec, sector_t *sec2) { ffloor_t *rover; if (!sec->ffloors) return NULL; for (rover = sec->ffloors; rover; rover = rover->next) if (rover->secnum == (size_t)(sec2 - sectors)) return rover; return NULL; } /** Adds a newly formed 3Dfloor structure to a sector's ffloors list. * * \param sec Target sector. * \param ffloor Newly formed 3Dfloor structure. * \sa P_AddFakeFloor */ static inline void P_AddFFloorToList(sector_t *sec, ffloor_t *ffloor) { ffloor_t *rover; if (!sec->ffloors) { sec->ffloors = ffloor; ffloor->next = 0; ffloor->prev = 0; return; } for (rover = sec->ffloors; rover->next; rover = rover->next); rover->next = ffloor; ffloor->prev = rover; ffloor->next = 0; } /** Adds a 3Dfloor. * * \param sec Target sector. * \param sec2 Control sector. * \param master Control linedef. * \param flags Options affecting this 3Dfloor. * \param secthinkers List of relevant thinkers sorted by sector. May be NULL. * \return Pointer to the new 3Dfloor. * \sa P_AddFFloor, P_AddFakeFloorsByLine, P_SpawnSpecials */ static ffloor_t *P_AddFakeFloor(sector_t *sec, sector_t *sec2, line_t *master, ffloortype_e flags, thinkerlist_t *secthinkers) { ffloor_t *ffloor; thinker_t *th; friction_t *f; pusher_t *p; levelspecthink_t *lst; size_t sec2num; size_t i; if (sec == sec2) return NULL; //Don't need a fake floor on a control sector. if ((ffloor = (P_GetFFloorBySec(sec, sec2)))) return ffloor; // If this ffloor already exists, return it if (sec2->ceilingheight < sec2->floorheight) { fixed_t tempceiling = sec2->ceilingheight; //flip the sector around and print an error instead of crashing 12.1.08 -Inuyasha CONS_Alert(CONS_ERROR, M_GetText("A FOF tagged %d has a top height below its bottom.\n"), master->tag); sec2->ceilingheight = sec2->floorheight; sec2->floorheight = tempceiling; } sec2->tagline = master; if (sec2->numattached == 0) { sec2->attached = Z_Malloc(sizeof (*sec2->attached) * sec2->maxattached, PU_STATIC, NULL); sec2->attachedsolid = Z_Malloc(sizeof (*sec2->attachedsolid) * sec2->maxattached, PU_STATIC, NULL); sec2->attached[0] = sec - sectors; sec2->numattached = 1; sec2->attachedsolid[0] = (flags & FF_SOLID); } else { for (i = 0; i < sec2->numattached; i++) if (sec2->attached[i] == (size_t)(sec - sectors)) return NULL; if (sec2->numattached >= sec2->maxattached) { sec2->maxattached *= 2; sec2->attached = Z_Realloc(sec2->attached, sizeof (*sec2->attached) * sec2->maxattached, PU_STATIC, NULL); sec2->attachedsolid = Z_Realloc(sec2->attachedsolid, sizeof (*sec2->attachedsolid) * sec2->maxattached, PU_STATIC, NULL); } sec2->attached[sec2->numattached] = sec - sectors; sec2->attachedsolid[sec2->numattached] = (flags & FF_SOLID); sec2->numattached++; } // Add the floor ffloor = Z_Calloc(sizeof (*ffloor), PU_LEVEL, NULL); ffloor->secnum = sec2 - sectors; ffloor->target = sec; ffloor->bottomheight = &sec2->floorheight; ffloor->bottompic = &sec2->floorpic; ffloor->bottomxoffs = &sec2->floor_xoffs; ffloor->bottomyoffs = &sec2->floor_yoffs; ffloor->bottomangle = &sec2->floorpic_angle; // Add the ceiling ffloor->topheight = &sec2->ceilingheight; ffloor->toppic = &sec2->ceilingpic; ffloor->toplightlevel = &sec2->lightlevel; ffloor->topxoffs = &sec2->ceiling_xoffs; ffloor->topyoffs = &sec2->ceiling_yoffs; ffloor->topangle = &sec2->ceilingpic_angle; #ifdef ESLOPE // Add slopes ffloor->t_slope = &sec2->c_slope; ffloor->b_slope = &sec2->f_slope; #endif if ((flags & FF_SOLID) && (master->flags & ML_EFFECT1)) // Block player only flags &= ~FF_BLOCKOTHERS; if ((flags & FF_SOLID) && (master->flags & ML_EFFECT2)) // Block all BUT player flags &= ~FF_BLOCKPLAYER; ffloor->spawnflags = ffloor->flags = flags; ffloor->master = master; ffloor->norender = INFTICS; // Scan the thinkers to check for special conditions applying to this FOF. // If we have thinkers sorted by sector, just check the relevant ones; // otherwise, check them all. Apologies for the ugly loop... sec2num = sec2 - sectors; // Just initialise both of these to placate the compiler. i = 0; th = thinkercap.next; for(;;) { if(secthinkers) { if(i < secthinkers[sec2num].count) th = secthinkers[sec2num].thinkers[i]; else break; } else if (th == &thinkercap) break; // Should this FOF have spikeness? if (th->function.acp1 == (actionf_p1)T_SpikeSector) { lst = (levelspecthink_t *)th; if (lst->sector == sec2) P_AddSpikeThinker(sec, (INT32)sec2num); } // Should this FOF have friction? else if(th->function.acp1 == (actionf_p1)T_Friction) { f = (friction_t *)th; if (f->affectee == (INT32)sec2num) Add_Friction(f->friction, f->movefactor, (INT32)(sec-sectors), f->affectee); } // Should this FOF have wind/current/pusher? else if(th->function.acp1 == (actionf_p1)T_Pusher) { p = (pusher_t *)th; if (p->affectee == (INT32)sec2num) Add_Pusher(p->type, p->x_mag<<FRACBITS, p->y_mag<<FRACBITS, p->source, (INT32)(sec-sectors), p->affectee, p->exclusive, p->slider); } if(secthinkers) i++; else th = th->next; } if (flags & FF_TRANSLUCENT) { if (sides[master->sidenum[0]].toptexture > 0) ffloor->alpha = sides[master->sidenum[0]].toptexture; // for future reference, "#0" is 1, and "#255" is 256. Be warned else ffloor->alpha = 0x80; } else ffloor->alpha = 0xff; ffloor->spawnalpha = ffloor->alpha; // save for netgames if (flags & FF_QUICKSAND) CheckForQuicksand = true; if ((flags & FF_BUSTUP) || (flags & FF_SHATTER) || (flags & FF_SPINBUST)) CheckForBustableBlocks = true; if ((flags & FF_MARIO)) { P_AddBlockThinker(sec2, master); CheckForMarioBlocks = true; } if ((flags & FF_CRUMBLE)) sec2->crumblestate = 1; if ((flags & FF_FLOATBOB)) { P_AddFloatThinker(sec2, sec->tag, master); CheckForFloatBob = true; } P_AddFFloorToList(sec, ffloor); return ffloor; } // // SPECIAL SPAWNING // /** Adds a spike thinker. * Sector type Section1:5 will result in this effect. * * \param sec Sector in which to add the thinker. * \param referrer If != sec, then we're dealing with a FOF * \sa P_SpawnSpecials, T_SpikeSector * \author SSNTails <http://www.ssntails.org> */ static void P_AddSpikeThinker(sector_t *sec, INT32 referrer) { levelspecthink_t *spikes; // create and initialize new thinker spikes = Z_Calloc(sizeof (*spikes), PU_LEVSPEC, NULL); P_AddThinker(&spikes->thinker); spikes->thinker.function.acp1 = (actionf_p1)T_SpikeSector; spikes->sector = sec; spikes->vars[0] = referrer; } /** Adds a float thinker. * Float thinkers cause solid 3Dfloors to float on water. * * \param sec Control sector. * \param actionsector Target sector. * \sa P_SpawnSpecials, T_FloatSector * \author SSNTails <http://www.ssntails.org> */ static void P_AddFloatThinker(sector_t *sec, INT32 tag, line_t *sourceline) { levelspecthink_t *floater; // create and initialize new thinker floater = Z_Calloc(sizeof (*floater), PU_LEVSPEC, NULL); P_AddThinker(&floater->thinker); floater->thinker.function.acp1 = (actionf_p1)T_FloatSector; floater->sector = sec; floater->vars[0] = tag; floater->sourceline = sourceline; } /** Adds a bridge thinker. * Bridge thinkers cause a group of FOFs to behave like * a bridge made up of pieces, that bows under weight. * * \param sec Control sector. * \sa P_SpawnSpecials, T_BridgeThinker * \author SSNTails <http://www.ssntails.org> */ /* static inline void P_AddBridgeThinker(line_t *sourceline, sector_t *sec) { levelspecthink_t *bridge; // create an initialize new thinker bridge = Z_Calloc(sizeof (*bridge), PU_LEVSPEC, NULL); P_AddThinker(&bridge->thinker); bridge->thinker.function.acp1 = (actionf_p1)T_BridgeThinker; bridge->sector = sec; bridge->vars[0] = sourceline->frontsector->floorheight; bridge->vars[1] = sourceline->frontsector->ceilingheight; bridge->vars[2] = P_AproxDistance(sourceline->dx, sourceline->dy); // Speed bridge->vars[2] = FixedDiv(bridge->vars[2], 16*FRACUNIT); bridge->vars[3] = bridge->vars[2]; // Start tag and end tag are TARGET SECTORS, not CONTROL SECTORS // Control sector tags should be End_Tag + (End_Tag - Start_Tag) bridge->vars[4] = sourceline->tag; // Start tag bridge->vars[5] = (sides[sourceline->sidenum[0]].textureoffset>>FRACBITS); // End tag } */ /** Adds a Mario block thinker, which changes the block's texture between blank * and ? depending on whether it has contents. * Needed in case objects respawn inside. * * \param sec Control sector. * \param actionsector Target sector. * \param sourceline Control linedef. * \sa P_SpawnSpecials, T_MarioBlockChecker * \author SSNTails <http://www.ssntails.org> */ static void P_AddBlockThinker(sector_t *sec, line_t *sourceline) { levelspecthink_t *block; // create and initialize new elevator thinker block = Z_Calloc(sizeof (*block), PU_LEVSPEC, NULL); P_AddThinker(&block->thinker); block->thinker.function.acp1 = (actionf_p1)T_MarioBlockChecker; block->sourceline = sourceline; block->sector = sec; } /** Adds a raise thinker. * A raise thinker checks to see if the * player is standing on its 3D Floor, * and if so, raises the platform towards * it's destination. Otherwise, it lowers * to the lowest nearby height if not * there already. * * Replaces the old "AirBob". * * \param sec Control sector. * \param actionsector Target sector. * \param sourceline Control linedef. * \sa P_SpawnSpecials, T_RaiseSector * \author SSNTails <http://www.ssntails.org> */ static void P_AddRaiseThinker(sector_t *sec, line_t *sourceline) { levelspecthink_t *raise; raise = Z_Calloc(sizeof (*raise), PU_LEVSPEC, NULL); P_AddThinker(&raise->thinker); raise->thinker.function.acp1 = (actionf_p1)T_RaiseSector; if (sourceline->flags & ML_BLOCKMONSTERS) raise->vars[0] = 1; else raise->vars[0] = 0; // set up the fields raise->sector = sec; // Require a spindash to activate if (sourceline->flags & ML_NOCLIMB) raise->vars[1] = 1; else raise->vars[1] = 0; raise->vars[2] = P_AproxDistance(sourceline->dx, sourceline->dy); raise->vars[2] = FixedDiv(raise->vars[2], 4*FRACUNIT); raise->vars[3] = raise->vars[2]; raise->vars[5] = P_FindHighestCeilingSurrounding(sec); raise->vars[4] = raise->vars[5] - (sec->ceilingheight - sec->floorheight); raise->vars[7] = P_FindLowestCeilingSurrounding(sec); raise->vars[6] = raise->vars[7] - (sec->ceilingheight - sec->floorheight); raise->sourceline = sourceline; } // Function to maintain backwards compatibility static void P_AddOldAirbob(sector_t *sec, line_t *sourceline, boolean noadjust) { levelspecthink_t *airbob; airbob = Z_Calloc(sizeof (*airbob), PU_LEVSPEC, NULL); P_AddThinker(&airbob->thinker); airbob->thinker.function.acp1 = (actionf_p1)T_RaiseSector; // set up the fields airbob->sector = sec; // Require a spindash to activate if (sourceline->flags & ML_NOCLIMB) airbob->vars[1] = 1; else airbob->vars[1] = 0; airbob->vars[2] = FRACUNIT; if (noadjust) { airbob->vars[7] = airbob->sector->ceilingheight-16*FRACUNIT; airbob->vars[6] = airbob->vars[7] - (sec->ceilingheight - sec->floorheight); } else airbob->vars[7] = airbob->sector->ceilingheight - P_AproxDistance(sourceline->dx, sourceline->dy); airbob->vars[3] = airbob->vars[2]; if (sourceline->flags & ML_BLOCKMONSTERS) airbob->vars[0] = 1; else airbob->vars[0] = 0; airbob->vars[5] = sec->ceilingheight; airbob->vars[4] = airbob->vars[5] - (sec->ceilingheight - sec->floorheight); airbob->sourceline = sourceline; } /** Adds a thwomp thinker. * Even thwomps need to think! * * \param sec Control sector. * \param actionsector Target sector. * \param sourceline Control linedef. * \sa P_SpawnSpecials, T_ThwompSector * \author SSNTails <http://www.ssntails.org> */ static inline void P_AddThwompThinker(sector_t *sec, sector_t *actionsector, line_t *sourceline) { #define speed vars[1] #define direction vars[2] #define distance vars[3] #define floorwasheight vars[4] #define ceilingwasheight vars[5] levelspecthink_t *thwomp; // You *probably* already have a thwomp in this sector. If you've combined it with something // else that uses the floordata/ceilingdata, you must be weird. if (sec->floordata || sec->ceilingdata) return; // create and initialize new elevator thinker thwomp = Z_Calloc(sizeof (*thwomp), PU_LEVSPEC, NULL); P_AddThinker(&thwomp->thinker); thwomp->thinker.function.acp1 = (actionf_p1)T_ThwompSector; // set up the fields according to the type of elevator action thwomp->sector = sec; thwomp->vars[0] = actionsector->tag; thwomp->floorwasheight = thwomp->sector->floorheight; thwomp->ceilingwasheight = thwomp->sector->ceilingheight; thwomp->direction = 0; thwomp->distance = 1; thwomp->sourceline = sourceline; thwomp->sector->floordata = thwomp; thwomp->sector->ceilingdata = thwomp; return; #undef speed #undef direction #undef distance #undef floorwasheight #undef ceilingwasheight } /** Adds a thinker which checks if any MF_ENEMY objects with health are in the defined area. * If not, a linedef executor is run once. * * \param sec Control sector. * \param sourceline Control linedef. * \sa P_SpawnSpecials, T_NoEnemiesSector * \author SSNTails <http://www.ssntails.org> */ static inline void P_AddNoEnemiesThinker(sector_t *sec, line_t *sourceline) { levelspecthink_t *nobaddies; // create and initialize new thinker nobaddies = Z_Calloc(sizeof (*nobaddies), PU_LEVSPEC, NULL); P_AddThinker(&nobaddies->thinker); nobaddies->thinker.function.acp1 = (actionf_p1)T_NoEnemiesSector; nobaddies->sector = sec; nobaddies->sourceline = sourceline; } /** Adds a thinker for Each-Time linedef executors. A linedef executor is run * only when a player enters the area and doesn't run again until they re-enter. * * \param sec Control sector that contains the lines of executors we will want to run. * \param sourceline Control linedef. * \sa P_SpawnSpecials, T_EachTimeThinker * \author SSNTails <http://www.ssntails.org> */ static inline void P_AddEachTimeThinker(sector_t *sec, line_t *sourceline) { levelspecthink_t *eachtime; // create and initialize new thinker eachtime = Z_Calloc(sizeof (*eachtime), PU_LEVSPEC, NULL); P_AddThinker(&eachtime->thinker); eachtime->thinker.function.acp1 = (actionf_p1)T_EachTimeThinker; eachtime->sector = sec; eachtime->sourceline = sourceline; } /** Adds a camera scanner. * * \param sourcesec Control sector. * \param actionsector Target sector. * \param angle Angle of the source line. * \sa P_SpawnSpecials, T_CameraScanner * \author SSNTails <http://www.ssntails.org> */ static inline void P_AddCameraScanner(sector_t *sourcesec, sector_t *actionsector, angle_t angle) { elevator_t *elevator; // Why not? LOL // create and initialize new elevator thinker elevator = Z_Calloc(sizeof (*elevator), PU_LEVSPEC, NULL); P_AddThinker(&elevator->thinker); elevator->thinker.function.acp1 = (actionf_p1)T_CameraScanner; elevator->type = elevateBounce; // set up the fields according to the type of elevator action elevator->sector = sourcesec; elevator->actionsector = actionsector; elevator->distance = FixedInt(AngleFixed(angle)); } static const ffloortype_e laserflags = FF_EXISTS|FF_RENDERALL|FF_NOSHADE|FF_EXTRA|FF_CUTEXTRA; /** Flashes a laser block. * * \param flash Thinker structure for this laser. * \sa EV_AddLaserThinker * \author SSNTails <http://www.ssntails.org> */ void T_LaserFlash(laserthink_t *flash) { msecnode_t *node; mobj_t *thing; sector_t *sourcesec; ffloor_t *ffloor = flash->ffloor; sector_t *sector = flash->sector; fixed_t top, bottom; if (!ffloor || !(ffloor->flags & FF_EXISTS)) return; if (leveltime & 1) ffloor->flags |= FF_RENDERALL; else ffloor->flags &= ~FF_RENDERALL; sourcesec = ffloor->master->frontsector; // Less to type! sector->soundorg.z = (*ffloor->topheight + *ffloor->bottomheight)/2; S_StartSound(&sector->soundorg, sfx_laser); // Seek out objects to DESTROY! MUAHAHHAHAHAA!!!*cough* for (node = sector->touching_thinglist; node && node->m_thing; node = node->m_snext) { thing = node->m_thing; if ((ffloor->master->flags & ML_EFFECT1) && thing->flags & MF_BOSS) continue; // Don't hurt bosses top = P_GetSpecialTopZ(thing, sourcesec, sector); bottom = P_GetSpecialBottomZ(thing, sourcesec, sector); if (thing->z >= top || thing->z + thing->height <= bottom) continue; if (thing->flags & MF_SHOOTABLE) P_DamageMobj(thing, NULL, NULL, 1); else if (thing->type == MT_EGGSHIELD) P_KillMobj(thing, NULL, NULL); } } /** Adds a laser thinker to a 3Dfloor. * * \param ffloor 3Dfloor to turn into a laser block. * \param sector Target sector. * \param secthkiners Lists of thinkers sorted by sector. May be NULL. * \sa T_LaserFlash * \author SSNTails <http://www.ssntails.org> */ static inline void EV_AddLaserThinker(sector_t *sec, sector_t *sec2, line_t *line, thinkerlist_t *secthinkers) { laserthink_t *flash; ffloor_t *ffloor = P_AddFakeFloor(sec, sec2, line, laserflags, secthinkers); if (!ffloor) return; flash = Z_Calloc(sizeof (*flash), PU_LEVSPEC, NULL); P_AddThinker(&flash->thinker); flash->thinker.function.acp1 = (actionf_p1)T_LaserFlash; flash->ffloor = ffloor; flash->sector = sec; // For finding mobjs flash->sec = sec2; flash->sourceline = line; } // // P_RunLevelLoadExecutors // // After loading/spawning all other specials // and items, execute these. // static void P_RunLevelLoadExecutors(void) { size_t i; for (i = 0; i < numlines; i++) { if (lines[i].special == 399) P_RunTriggerLinedef(&lines[i], NULL, NULL); } } /** After the map has loaded, scans for specials that spawn 3Dfloors and * thinkers. * * \todo Split up into multiple functions. * \todo Get rid of all the magic numbers. * \todo Potentially use 'fromnetsave' to stop any new thinkers from being created * as they'll just be erased by UnArchiveThinkers. * \sa P_SpawnPrecipitation, P_SpawnFriction, P_SpawnPushers, P_SpawnScrollers */ void P_SpawnSpecials(INT32 fromnetsave) { sector_t *sector; size_t i; INT32 j; thinkerlist_t *secthinkers; thinker_t *th; // This used to be used, and *should* be used in the future, // but currently isn't. (void)fromnetsave; // Set the default gravity. Custom gravity overrides this setting. gravity = FRACUNIT/2; // Defaults in case levels don't have them set. sstimer = 90*TICRATE + 6; totalrings = 1; CheckForBustableBlocks = CheckForBouncySector = CheckForQuicksand = CheckForMarioBlocks = CheckForFloatBob = CheckForReverseGravity = false; // Init special SECTORs. sector = sectors; for (i = 0; i < numsectors; i++, sector++) { if (!sector->special) continue; // Process Section 1 switch(GETSECSPECIAL(sector->special, 1)) { case 5: // Spikes P_AddSpikeThinker(sector, (INT32)(sector-sectors)); break; case 15: // Bouncy sector CheckForBouncySector = true; break; } // Process Section 2 switch(GETSECSPECIAL(sector->special, 2)) { case 10: // Time for special stage sstimer = (sector->floorheight>>FRACBITS) * TICRATE + 6; // Time to finish totalrings = sector->ceilingheight>>FRACBITS; // Ring count for special stage break; case 11: // Custom global gravity! gravity = sector->floorheight/1000; break; } // Process Section 3 /* switch(GETSECSPECIAL(player->specialsector, 3)) { }*/ // Process Section 4 switch(GETSECSPECIAL(sector->special, 4)) { case 10: // Circuit finish line if (gametype == GT_RACE) circuitmap = true; break; } } if (mapheaderinfo[gamemap-1]->weather == 2) // snow curWeather = PRECIP_SNOW; else if (mapheaderinfo[gamemap-1]->weather == 3) // rain curWeather = PRECIP_RAIN; else if (mapheaderinfo[gamemap-1]->weather == 1) // storm curWeather = PRECIP_STORM; else if (mapheaderinfo[gamemap-1]->weather == 5) // storm w/o rain curWeather = PRECIP_STORM_NORAIN; else if (mapheaderinfo[gamemap-1]->weather == 6) // storm w/o lightning curWeather = PRECIP_STORM_NOSTRIKES; else curWeather = PRECIP_NONE; P_InitTagLists(); // Create xref tables for tags P_SearchForDisableLinedefs(); // Disable linedefs are now allowed to disable *any* line P_SpawnScrollers(); // Add generalized scrollers P_SpawnFriction(); // Friction model using linedefs P_SpawnPushers(); // Pusher model using linedefs // Look for thinkers that affect FOFs, and sort them by sector secthinkers = Z_Calloc(numsectors * sizeof(thinkerlist_t), PU_STATIC, NULL); // Firstly, find out how many there are in each sector for (th = thinkercap.next; th != &thinkercap; th = th->next) { if (th->function.acp1 == (actionf_p1)T_SpikeSector) secthinkers[((levelspecthink_t *)th)->sector - sectors].count++; else if (th->function.acp1 == (actionf_p1)T_Friction) secthinkers[((friction_t *)th)->affectee].count++; else if (th->function.acp1 == (actionf_p1)T_Pusher) secthinkers[((pusher_t *)th)->affectee].count++; } // Allocate each list, and then zero the count so we can use it to track // the end of the list as we add the thinkers for (i = 0; i < numsectors; i++) if(secthinkers[i].count > 0) { secthinkers[i].thinkers = Z_Malloc(secthinkers[i].count * sizeof(thinker_t *), PU_STATIC, NULL); secthinkers[i].count = 0; } // Finally, populate the lists. for (th = thinkercap.next; th != &thinkercap; th = th->next) { size_t secnum = (size_t)-1; if (th->function.acp1 == (actionf_p1)T_SpikeSector) secnum = ((levelspecthink_t *)th)->sector - sectors; else if (th->function.acp1 == (actionf_p1)T_Friction) secnum = ((friction_t *)th)->affectee; else if (th->function.acp1 == (actionf_p1)T_Pusher) secnum = ((pusher_t *)th)->affectee; if (secnum != (size_t)-1) secthinkers[secnum].thinkers[secthinkers[secnum].count++] = th; } // Init line EFFECTs for (i = 0; i < numlines; i++) { // set line specials to 0 here too, same reason as above if (netgame || multiplayer) { // future: nonet flag? } else if ((lines[i].flags & ML_NETONLY) == ML_NETONLY) { lines[i].special = 0; continue; } else { if (players[consoleplayer].charability == CA_THOK && (lines[i].flags & ML_NOSONIC)) { lines[i].special = 0; continue; } if (players[consoleplayer].charability == CA_FLY && (lines[i].flags & ML_NOTAILS)) { lines[i].special = 0; continue; } if (players[consoleplayer].charability == CA_GLIDEANDCLIMB && (lines[i].flags & ML_NOKNUX)) { lines[i].special = 0; continue; } } switch (lines[i].special) { INT32 s; size_t sec; ffloortype_e ffloorflags; case 1: // Definable gravity per sector sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) { sectors[s].gravity = &sectors[sec].floorheight; // This allows it to change in realtime! if (lines[i].flags & ML_NOCLIMB) sectors[s].verticalflip = true; else sectors[s].verticalflip = false; CheckForReverseGravity = sectors[s].verticalflip; } break; case 2: // Custom exit break; case 3: // Zoom Tube Parameters break; case 4: // Speed pad (combines with sector special Section3:5 or Section3:6) break; case 5: // Change camera info sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) P_AddCameraScanner(&sectors[sec], &sectors[s], R_PointToAngle2(lines[i].v2->x, lines[i].v2->y, lines[i].v1->x, lines[i].v1->y)); break; #ifdef PARANOIA case 6: // Disable tags if level not cleared I_Error("Failed to catch a disable linedef"); break; #endif case 7: // Flat alignment if (lines[i].flags & ML_EFFECT4) // Align angle { if (!(lines[i].flags & ML_EFFECT5)) // Align floor unless ALLTRIGGER flag is set { for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].spawn_flrpic_angle = sectors[s].floorpic_angle = R_PointToAngle2(lines[i].v1->x, lines[i].v1->y, lines[i].v2->x, lines[i].v2->y); } if (!(lines[i].flags & ML_BOUNCY)) // Align ceiling unless BOUNCY flag is set { for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].spawn_ceilpic_angle = sectors[s].ceilingpic_angle = R_PointToAngle2(lines[i].v1->x, lines[i].v1->y, lines[i].v2->x, lines[i].v2->y); } } else // Do offsets { if (!(lines[i].flags & ML_BLOCKMONSTERS)) // Align floor unless BLOCKMONSTERS flag is set { for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) { sectors[s].floor_xoffs += lines[i].dx; sectors[s].floor_yoffs += lines[i].dy; // saved for netgames sectors[s].spawn_flr_xoffs = sectors[s].floor_xoffs; sectors[s].spawn_flr_yoffs = sectors[s].floor_yoffs; } } if (!(lines[i].flags & ML_NOCLIMB)) // Align ceiling unless NOCLIMB flag is set { for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) { sectors[s].ceiling_xoffs += lines[i].dx; sectors[s].ceiling_yoffs += lines[i].dy; // saved for netgames sectors[s].spawn_ceil_xoffs = sectors[s].ceiling_xoffs; sectors[s].spawn_ceil_yoffs = sectors[s].ceiling_yoffs; } } } break; case 8: // Sector Parameters for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) { if (lines[i].flags & ML_NOCLIMB) { sectors[s].flags &= ~SF_FLIPSPECIAL_FLOOR; sectors[s].flags |= SF_FLIPSPECIAL_CEILING; } else if (lines[i].flags & ML_EFFECT4) sectors[s].flags |= SF_FLIPSPECIAL_BOTH; if (lines[i].flags & ML_EFFECT3) sectors[s].flags |= SF_TRIGGERSPECIAL_TOUCH; if (lines[i].frontsector && GETSECSPECIAL(lines[i].frontsector->special, 4) == 12) sectors[s].camsec = sides[*lines[i].sidenum].sector-sectors; } break; case 9: // Chain Parameters break; case 10: // Vertical culling plane for sprites and FOFs sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].cullheight = &lines[i]; // This allows it to change in realtime! break; case 50: // Insta-Lower Sector EV_DoFloor(&lines[i], instantLower); break; case 51: // Instant raise for ceilings EV_DoCeiling(&lines[i], instantRaise); break; case 52: // Continuously Falling sector EV_DoContinuousFall(lines[i].frontsector, lines[i].backsector, P_AproxDistance(lines[i].dx, lines[i].dy), (lines[i].flags & ML_NOCLIMB)); break; case 53: // New super cool and awesome moving floor and ceiling type case 54: // New super cool and awesome moving floor type if (lines[i].backsector) EV_DoFloor(&lines[i], bounceFloor); if (lines[i].special == 54) break; case 55: // New super cool and awesome moving ceiling type if (lines[i].backsector) EV_DoCeiling(&lines[i], bounceCeiling); break; case 56: // New super cool and awesome moving floor and ceiling crush type case 57: // New super cool and awesome moving floor crush type if (lines[i].backsector) EV_DoFloor(&lines[i], bounceFloorCrush); if (lines[i].special == 57) break; //only move the floor case 58: // New super cool and awesome moving ceiling crush type if (lines[i].backsector) EV_DoCeiling(&lines[i], bounceCeilingCrush); break; case 59: // Activate floating platform EV_DoElevator(&lines[i], elevateContinuous, false); break; case 60: // Floating platform with adjustable speed EV_DoElevator(&lines[i], elevateContinuous, true); break; case 61: // Crusher! EV_DoCrush(&lines[i], crushAndRaise); break; case 62: // Crusher (up and then down)! EV_DoCrush(&lines[i], fastCrushAndRaise); break; case 63: // support for drawn heights coming from different sector sec = sides[*lines[i].sidenum].sector-sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].heightsec = (INT32)sec; break; case 64: // Appearing/Disappearing FOF option if (lines[i].flags & ML_BLOCKMONSTERS) { // Find FOFs by control sector tag for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) for (j = 0; (unsigned)j < sectors[s].linecount; j++) if (sectors[s].lines[j]->special >= 100 && sectors[s].lines[j]->special < 300) Add_MasterDisappearer(abs(lines[i].dx>>FRACBITS), abs(lines[i].dy>>FRACBITS), abs(sides[lines[i].sidenum[0]].sector->floorheight>>FRACBITS), (INT32)(sectors[s].lines[j]-lines), (INT32)i); } else // Find FOFs by effect sector tag for (s = -1; (s = P_FindLineFromLineTag(lines + i, s)) >= 0 ;) { if ((size_t)s == i) continue; if (sides[lines[s].sidenum[0]].sector->tag == sides[lines[i].sidenum[0]].sector->tag) Add_MasterDisappearer(abs(lines[i].dx>>FRACBITS), abs(lines[i].dy>>FRACBITS), abs(sides[lines[i].sidenum[0]].sector->floorheight>>FRACBITS), s, (INT32)i); } break; case 65: // Bridge Thinker /* // Disable this until it's working right! for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) P_AddBridgeThinker(&lines[i], &sectors[s]);*/ break; case 100: // FOF (solid, opaque, shadows) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL, secthinkers); break; case 101: // FOF (solid, opaque, no shadows) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_NOSHADE|FF_CUTLEVEL, secthinkers); break; case 102: // TL block: FOF (solid, translucent) ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_NOSHADE|FF_TRANSLUCENT|FF_EXTRA|FF_CUTEXTRA; // Draw the 'insides' of the block too if (lines[i].flags & ML_NOCLIMB) { ffloorflags |= FF_CUTLEVEL; ffloorflags |= FF_BOTHPLANES; ffloorflags |= FF_ALLSIDES; ffloorflags &= ~FF_EXTRA; ffloorflags &= ~FF_CUTEXTRA; } P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 103: // Solid FOF with no floor/ceiling (quite possibly useless) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERSIDES|FF_NOSHADE|FF_CUTLEVEL, secthinkers); break; case 104: // 3D Floor type that doesn't draw sides // If line has no-climb set, give it shadows, otherwise don't ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERPLANES|FF_CUTLEVEL; if (!(lines[i].flags & ML_NOCLIMB)) ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 105: // FOF (solid, invisible) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_NOSHADE, secthinkers); break; case 120: // Opaque water ffloorflags = FF_EXISTS|FF_RENDERALL|FF_SWIMMABLE|FF_BOTHPLANES|FF_ALLSIDES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_DOUBLESHADOW; if (lines[i].flags & ML_EFFECT4) ffloorflags |= FF_COLORMAPONLY; if (lines[i].flags & ML_EFFECT5) ffloorflags |= FF_RIPPLE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 121: // TL water ffloorflags = FF_EXISTS|FF_RENDERALL|FF_TRANSLUCENT|FF_SWIMMABLE|FF_BOTHPLANES|FF_ALLSIDES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_DOUBLESHADOW; if (lines[i].flags & ML_EFFECT4) ffloorflags |= FF_COLORMAPONLY; if (lines[i].flags & ML_EFFECT5) ffloorflags |= FF_RIPPLE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 122: // Opaque water, no sides ffloorflags = FF_EXISTS|FF_RENDERPLANES|FF_SWIMMABLE|FF_BOTHPLANES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_DOUBLESHADOW; if (lines[i].flags & ML_EFFECT4) ffloorflags |= FF_COLORMAPONLY; if (lines[i].flags & ML_EFFECT5) ffloorflags |= FF_RIPPLE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 123: // TL water, no sides ffloorflags = FF_EXISTS|FF_RENDERPLANES|FF_TRANSLUCENT|FF_SWIMMABLE|FF_BOTHPLANES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_DOUBLESHADOW; if (lines[i].flags & ML_EFFECT4) ffloorflags |= FF_COLORMAPONLY; if (lines[i].flags & ML_EFFECT5) ffloorflags |= FF_RIPPLE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 124: // goo water ffloorflags = FF_EXISTS|FF_RENDERALL|FF_TRANSLUCENT|FF_SWIMMABLE|FF_GOOWATER|FF_BOTHPLANES|FF_ALLSIDES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_DOUBLESHADOW; if (lines[i].flags & ML_EFFECT4) ffloorflags |= FF_COLORMAPONLY; if (lines[i].flags & ML_EFFECT5) ffloorflags |= FF_RIPPLE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 125: // goo water, no sides ffloorflags = FF_EXISTS|FF_RENDERPLANES|FF_TRANSLUCENT|FF_SWIMMABLE|FF_GOOWATER|FF_BOTHPLANES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_DOUBLESHADOW; if (lines[i].flags & ML_EFFECT4) ffloorflags |= FF_COLORMAPONLY; if (lines[i].flags & ML_EFFECT5) ffloorflags |= FF_RIPPLE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 140: // 'Platform' - You can jump up through it // If line has no-climb set, don't give it shadows, otherwise do ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_PLATFORM|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 141: // Translucent "platform" // If line has no-climb set, don't give it shadows, otherwise do ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_PLATFORM|FF_TRANSLUCENT|FF_EXTRA|FF_CUTEXTRA; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_NOSHADE; // Draw the 'insides' of the block too if (lines[i].flags & ML_EFFECT2) { ffloorflags |= FF_CUTLEVEL; ffloorflags |= FF_BOTHPLANES; ffloorflags |= FF_ALLSIDES; ffloorflags &= ~FF_EXTRA; ffloorflags &= ~FF_CUTEXTRA; } P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 142: // Translucent "platform" with no sides ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERPLANES|FF_TRANSLUCENT|FF_PLATFORM|FF_EXTRA|FF_CUTEXTRA; if (lines[i].flags & ML_NOCLIMB) // shade it unless no-climb ffloorflags |= FF_NOSHADE; // Draw the 'insides' of the block too if (lines[i].flags & ML_EFFECT2) { ffloorflags |= FF_CUTLEVEL; ffloorflags |= FF_BOTHPLANES; ffloorflags |= FF_ALLSIDES; ffloorflags &= ~FF_EXTRA; ffloorflags &= ~FF_CUTEXTRA; } P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 143: // 'Reverse platform' - You fall through it // If line has no-climb set, don't give it shadows, otherwise do ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_REVERSEPLATFORM|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 144: // Translucent "reverse platform" // If line has no-climb set, don't give it shadows, otherwise do ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_REVERSEPLATFORM|FF_TRANSLUCENT|FF_EXTRA|FF_CUTEXTRA; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_NOSHADE; // Draw the 'insides' of the block too if (lines[i].flags & ML_EFFECT2) { ffloorflags |= FF_CUTLEVEL; ffloorflags |= FF_BOTHPLANES; ffloorflags |= FF_ALLSIDES; ffloorflags &= ~FF_EXTRA; ffloorflags &= ~FF_CUTEXTRA; } P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 145: // Translucent "reverse platform" with no sides ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERPLANES|FF_TRANSLUCENT|FF_REVERSEPLATFORM|FF_EXTRA|FF_CUTEXTRA; if (lines[i].flags & ML_NOCLIMB) // shade it unless no-climb ffloorflags |= FF_NOSHADE; // Draw the 'insides' of the block too if (lines[i].flags & ML_EFFECT2) { ffloorflags |= FF_CUTLEVEL; ffloorflags |= FF_BOTHPLANES; ffloorflags |= FF_ALLSIDES; ffloorflags &= ~FF_EXTRA; ffloorflags &= ~FF_CUTEXTRA; } P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 146: // Intangible floor/ceiling with solid sides (fences/hoops maybe?) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERSIDES|FF_ALLSIDES|FF_INTANGABLEFLATS, secthinkers); break; case 150: // Air bobbing platform case 151: // Adjustable air bobbing platform P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL, secthinkers); lines[i].flags |= ML_BLOCKMONSTERS; P_AddOldAirbob(lines[i].frontsector, lines + i, (lines[i].special != 151)); break; case 152: // Adjustable air bobbing platform in reverse P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL, secthinkers); P_AddOldAirbob(lines[i].frontsector, lines + i, true); break; case 160: // Float/bob platform P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_FLOATBOB, secthinkers); break; case 170: // Crumbling platform P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_CRUMBLE, secthinkers); break; case 171: // Crumbling platform that will not return P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_CRUMBLE|FF_NORETURN, secthinkers); break; case 172: // "Platform" that crumbles and returns ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_PLATFORM|FF_CRUMBLE|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) // shade it unless no-climb ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 173: // "Platform" that crumbles and doesn't return ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_PLATFORM|FF_CRUMBLE|FF_NORETURN|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) // shade it unless no-climb ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 174: // Translucent "platform" that crumbles and returns ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_PLATFORM|FF_CRUMBLE|FF_TRANSLUCENT|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) // shade it unless no-climb ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 175: // Translucent "platform" that crumbles and doesn't return ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_PLATFORM|FF_CRUMBLE|FF_NORETURN|FF_TRANSLUCENT|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) // shade it unless no-climb ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 176: // Air bobbing platform that will crumble and bob on the water when it falls and hits P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_FLOATBOB|FF_CRUMBLE, secthinkers); lines[i].flags |= ML_BLOCKMONSTERS; P_AddOldAirbob(lines[i].frontsector, lines + i, true); break; case 177: // Air bobbing platform that will crumble and bob on // the water when it falls and hits, then never return P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_FLOATBOB|FF_CRUMBLE|FF_NORETURN, secthinkers); lines[i].flags |= ML_BLOCKMONSTERS; P_AddOldAirbob(lines[i].frontsector, lines + i, true); break; case 178: // Crumbling platform that will float when it hits water P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CRUMBLE|FF_FLOATBOB, secthinkers); break; case 179: // Crumbling platform that will float when it hits water, but not return P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_CRUMBLE|FF_FLOATBOB|FF_NORETURN, secthinkers); break; case 180: // Air bobbing platform that will crumble P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_CRUMBLE, secthinkers); lines[i].flags |= ML_BLOCKMONSTERS; P_AddOldAirbob(lines[i].frontsector, lines + i, true); break; case 190: // Rising Platform FOF (solid, opaque, shadows) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL, secthinkers); P_AddRaiseThinker(lines[i].frontsector, &lines[i]); break; case 191: // Rising Platform FOF (solid, opaque, no shadows) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_NOSHADE|FF_CUTLEVEL, secthinkers); P_AddRaiseThinker(lines[i].frontsector, &lines[i]); break; case 192: // Rising Platform TL block: FOF (solid, translucent) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_NOSHADE|FF_TRANSLUCENT|FF_EXTRA|FF_CUTEXTRA, secthinkers); P_AddRaiseThinker(lines[i].frontsector, &lines[i]); break; case 193: // Rising Platform FOF (solid, invisible) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_NOSHADE, secthinkers); P_AddRaiseThinker(lines[i].frontsector, &lines[i]); break; case 194: // Rising Platform 'Platform' - You can jump up through it // If line has no-climb set, don't give it shadows, otherwise do ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_PLATFORM|FF_BOTHPLANES|FF_ALLSIDES; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); P_AddRaiseThinker(lines[i].frontsector, &lines[i]); break; case 195: // Rising Platform Translucent "platform" // If line has no-climb set, don't give it shadows, otherwise do ffloorflags = FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_PLATFORM|FF_TRANSLUCENT|FF_BOTHPLANES|FF_ALLSIDES|FF_EXTRA|FF_CUTEXTRA; if (lines[i].flags & ML_NOCLIMB) ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); P_AddRaiseThinker(lines[i].frontsector, &lines[i]); break; case 200: // Double light effect P_AddFakeFloorsByLine(i, FF_EXISTS|FF_CUTSPRITES|FF_DOUBLESHADOW, secthinkers); break; case 201: // Light effect P_AddFakeFloorsByLine(i, FF_EXISTS|FF_CUTSPRITES, secthinkers); break; case 202: // Fog ffloorflags = FF_EXISTS|FF_RENDERALL|FF_FOG|FF_BOTHPLANES|FF_INVERTPLANES|FF_ALLSIDES|FF_INVERTSIDES|FF_CUTEXTRA|FF_EXTRA|FF_DOUBLESHADOW|FF_CUTSPRITES; sec = sides[*lines[i].sidenum].sector - sectors; // SoM: Because it's fog, check for an extra colormap and set // the fog flag... if (sectors[sec].extra_colormap) sectors[sec].extra_colormap->fog = 1; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 220: // Like opaque water, but not swimmable. (Good for snow effect on FOFs) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_RENDERALL|FF_BOTHPLANES|FF_ALLSIDES|FF_CUTEXTRA|FF_EXTRA|FF_CUTSPRITES, secthinkers); break; case 221: // FOF (intangible, translucent) // If line has no-climb set, give it shadows, otherwise don't ffloorflags = FF_EXISTS|FF_RENDERALL|FF_TRANSLUCENT|FF_EXTRA|FF_CUTEXTRA|FF_CUTSPRITES; if (!(lines[i].flags & ML_NOCLIMB)) ffloorflags |= FF_NOSHADE; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 222: // FOF with no floor/ceiling (good for GFZGRASS effect on FOFs) // If line has no-climb set, give it shadows, otherwise don't ffloorflags = FF_EXISTS|FF_RENDERSIDES|FF_ALLSIDES; if (!(lines[i].flags & ML_NOCLIMB)) ffloorflags |= FF_NOSHADE|FF_CUTSPRITES; P_AddFakeFloorsByLine(i, ffloorflags, secthinkers); break; case 223: // FOF (intangible, invisible) - for combining specials in a sector P_AddFakeFloorsByLine(i, FF_EXISTS|FF_NOSHADE, secthinkers); break; case 250: // Mario Block P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL|FF_MARIO, secthinkers); break; case 251: // A THWOMP! sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) { P_AddThwompThinker(&sectors[sec], &sectors[s], &lines[i]); P_AddFakeFloor(&sectors[s], &sectors[sec], lines + i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_CUTLEVEL, secthinkers); } break; case 252: // Shatter block (breaks when touched) if (lines[i].flags & ML_NOCLIMB) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_BUSTUP|FF_SHATTER|FF_SHATTERBOTTOM, secthinkers); else P_AddFakeFloorsByLine(i, FF_EXISTS|FF_RENDERALL|FF_BUSTUP|FF_SHATTER, secthinkers); break; case 253: // Translucent shatter block (see 76) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_RENDERALL|FF_BUSTUP|FF_SHATTER|FF_TRANSLUCENT, secthinkers); break; case 254: // Bustable block if (lines[i].flags & ML_NOCLIMB) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_BUSTUP|FF_ONLYKNUX, secthinkers); else P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_BUSTUP, secthinkers); break; case 255: // Spin bust block (breaks when jumped or spun downwards onto) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_BUSTUP|FF_SPINBUST, secthinkers); break; case 256: // Translucent spin bust block (see 78) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_SOLID|FF_RENDERALL|FF_BUSTUP|FF_SPINBUST|FF_TRANSLUCENT, secthinkers); break; case 257: // Quicksand if (lines[i].flags & ML_EFFECT5) P_AddFakeFloorsByLine(i, FF_EXISTS|FF_QUICKSAND|FF_RENDERALL|FF_ALLSIDES|FF_CUTSPRITES|FF_RIPPLE, secthinkers); else P_AddFakeFloorsByLine(i, FF_EXISTS|FF_QUICKSAND|FF_RENDERALL|FF_ALLSIDES|FF_CUTSPRITES, secthinkers); break; case 258: // Laser block sec = sides[*lines[i].sidenum].sector - sectors; // No longer totally disrupts netgames for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) EV_AddLaserThinker(&sectors[s], &sectors[sec], lines + i, secthinkers); break; case 259: // Make-Your-Own FOF! if (lines[i].sidenum[1] != 0xffff) { UINT8 *data = W_CacheLumpNum(lastloadedmaplumpnum + ML_SIDEDEFS,PU_STATIC); UINT16 b; for (b = 0; b < (INT16)numsides; b++) { register mapsidedef_t *msd = (mapsidedef_t *)data + b; if (b == lines[i].sidenum[1]) { if ((msd->toptexture[0] >= '0' && msd->toptexture[0] <= '9') || (msd->toptexture[0] >= 'A' && msd->toptexture[0] <= 'F')) { ffloortype_e FOF_Flags = axtoi(msd->toptexture); P_AddFakeFloorsByLine(i, FOF_Flags, secthinkers); break; } else I_Error("Make-Your-Own-FOF (tag %d) needs a value in the linedef's second side upper texture field.", lines[i].tag); } } Z_Free(data); } else I_Error("Make-Your-Own FOF (tag %d) found without a 2nd linedef side!", lines[i].tag); break; case 300: // Linedef executor (combines with sector special 974/975) and commands case 302: case 303: case 304: // Charability linedef executors case 305: case 307: break; case 308: // Race-only linedef executor. Triggers once. if (gametype != GT_RACE && gametype != GT_COMPETITION) lines[i].special = 0; break; // Linedef executor triggers for CTF teams. case 309: case 311: if (gametype != GT_CTF) lines[i].special = 0; break; // Each time executors case 306: case 301: case 310: case 312: sec = sides[*lines[i].sidenum].sector - sectors; P_AddEachTimeThinker(&sectors[sec], &lines[i]); break; // No More Enemies Linedef Exec case 313: sec = sides[*lines[i].sidenum].sector - sectors; P_AddNoEnemiesThinker(&sectors[sec], &lines[i]); break; // Pushable linedef executors (count # of pushables) case 314: case 315: break; // Unlock trigger executors case 317: case 318: break; case 319: case 320: break; // Trigger on X calls case 321: case 322: if (lines[i].flags & ML_NOCLIMB && sides[lines[i].sidenum[0]].rowoffset > 0) // optional "starting" count lines[i].callcount = sides[lines[i].sidenum[0]].rowoffset>>FRACBITS; else lines[i].callcount = sides[lines[i].sidenum[0]].textureoffset>>FRACBITS; if (lines[i].special == 322) // Each time { sec = sides[*lines[i].sidenum].sector - sectors; P_AddEachTimeThinker(&sectors[sec], &lines[i]); } break; case 399: // Linedef execute on map load // This is handled in P_RunLevelLoadExecutors. break; case 400: case 401: case 402: case 403: case 404: case 405: case 406: case 407: case 408: case 409: case 410: case 411: case 412: case 413: case 414: case 415: case 416: case 417: case 418: case 419: case 420: case 421: case 422: case 423: case 424: case 425: case 426: case 427: case 428: case 429: case 430: case 431: break; // 500 is used for a scroller // 501 is used for a scroller // 502 is used for a scroller // 503 is used for a scroller // 504 is used for a scroller // 505 is used for a scroller // 510 is used for a scroller // 511 is used for a scroller // 512 is used for a scroller // 513 is used for a scroller // 514 is used for a scroller // 515 is used for a scroller // 520 is used for a scroller // 521 is used for a scroller // 522 is used for a scroller // 523 is used for a scroller // 524 is used for a scroller // 525 is used for a scroller // 530 is used for a scroller // 531 is used for a scroller // 532 is used for a scroller // 533 is used for a scroller // 534 is used for a scroller // 535 is used for a scroller // 540 is used for friction // 541 is used for wind // 542 is used for upwards wind // 543 is used for downwards wind // 544 is used for current // 545 is used for upwards current // 546 is used for downwards current // 547 is used for push/pull case 600: // floor lighting independently (e.g. lava) sec = sides[*lines[i].sidenum].sector-sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].floorlightsec = (INT32)sec; break; case 601: // ceiling lighting independently sec = sides[*lines[i].sidenum].sector-sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].ceilinglightsec = (INT32)sec; break; case 602: // Adjustable pulsating light sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) P_SpawnAdjustableGlowingLight(&sectors[sec], &sectors[s], P_AproxDistance(lines[i].dx, lines[i].dy)>>FRACBITS); break; case 603: // Adjustable flickering light sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) P_SpawnAdjustableFireFlicker(&sectors[sec], &sectors[s], P_AproxDistance(lines[i].dx, lines[i].dy)>>FRACBITS); break; case 604: // Adjustable Blinking Light (unsynchronized) sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) P_SpawnAdjustableStrobeFlash(&sectors[sec], &sectors[s], abs(lines[i].dx)>>FRACBITS, abs(lines[i].dy)>>FRACBITS, false); break; case 605: // Adjustable Blinking Light (synchronized) sec = sides[*lines[i].sidenum].sector - sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) P_SpawnAdjustableStrobeFlash(&sectors[sec], &sectors[s], abs(lines[i].dx)>>FRACBITS, abs(lines[i].dy)>>FRACBITS, true); break; case 606: // HACK! Copy colormaps. Just plain colormaps. for (s = -1; (s = P_FindSectorFromLineTag(lines + i, s)) >= 0 ;) sectors[s].midmap = lines[i].frontsector->midmap; break; #ifdef ESLOPE // Slope copy specials. Handled here for sanity. case 720: case 721: case 722: P_CopySectorSlope(&lines[i]); break; #endif default: break; } } // Allocate each list for (i = 0; i < numsectors; i++) if(secthinkers[i].thinkers) Z_Free(secthinkers[i].thinkers); Z_Free(secthinkers); #ifdef POLYOBJECTS // haleyjd 02/20/06: spawn polyobjects Polyobj_InitLevel(); for (i = 0; i < numlines; i++) { switch (lines[i].special) { case 30: // Polyobj_Flag EV_DoPolyObjFlag(&lines[i]); break; case 31: // Polyobj_Displace PolyDisplace(&lines[i]); break; } } #endif P_RunLevelLoadExecutors(); } /** Adds 3Dfloors as appropriate based on a common control linedef. * * \param line Control linedef to use. * \param ffloorflags 3Dfloor flags to use. * \param secthkiners Lists of thinkers sorted by sector. May be NULL. * \sa P_SpawnSpecials, P_AddFakeFloor * \author Graue <graue@oceanbase.org> */ static void P_AddFakeFloorsByLine(size_t line, ffloortype_e ffloorflags, thinkerlist_t *secthinkers) { INT32 s; size_t sec = sides[*lines[line].sidenum].sector-sectors; for (s = -1; (s = P_FindSectorFromLineTag(lines+line, s)) >= 0 ;) P_AddFakeFloor(&sectors[s], &sectors[sec], lines+line, ffloorflags, secthinkers); } /* SoM: 3/8/2000: General scrolling functions. T_Scroll, Add_Scroller, Add_WallScroller, P_SpawnScrollers */ // helper function for T_Scroll static void P_DoScrollMove(mobj_t *thing, fixed_t dx, fixed_t dy, INT32 exclusive) { fixed_t fuckaj = 0; // Nov 05 14:12:08 <+MonsterIestyn> I've heard of explicitly defined variables but this is ridiculous if (thing->player) { if (!(dx | dy)) { thing->player->cmomx = 0; thing->player->cmomy = 0; } else { thing->player->cmomx += dx; thing->player->cmomy += dy; thing->player->cmomx = FixedMul(thing->player->cmomx, 0xe800); thing->player->cmomy = FixedMul(thing->player->cmomy, 0xe800); } } if (thing->player && (thing->player->pflags & PF_SPINNING) && (thing->player->rmomx || thing->player->rmomy) && !(thing->player->pflags & PF_STARTDASH)) fuckaj = FixedDiv(549*ORIG_FRICTION,500*FRACUNIT); else if (thing->friction != ORIG_FRICTION) fuckaj = thing->friction; if (fuckaj) { // refactor thrust for new friction dx = FixedDiv(dx, CARRYFACTOR); dy = FixedDiv(dy, CARRYFACTOR); dx = FixedMul(dx, FRACUNIT-fuckaj); dy = FixedMul(dy, FRACUNIT-fuckaj); } thing->momx += dx; thing->momy += dy; if (exclusive) thing->flags2 |= MF2_PUSHED; } /** Processes an active scroller. * This function, with the help of r_plane.c and r_bsp.c, supports generalized * scrolling floors and walls, with optional mobj-carrying properties, e.g. * conveyor belts, rivers, etc. A linedef with a special type affects all * tagged sectors the same way, by creating scrolling and/or object-carrying * properties. Multiple linedefs may be used on the same sector and are * cumulative, although the special case of scrolling a floor and carrying * things on it requires only one linedef. * * The linedef's direction determines the scrolling direction, and the * linedef's length determines the scrolling speed. This was designed so an * edge around a sector can be used to control the direction of the sector's * scrolling, which is usually what is desired. * * \param s Thinker for the scroller to process. * \todo Split up into multiple functions. * \todo Use attached lists to make ::sc_carry_ceiling case faster and * cleaner. * \sa Add_Scroller, Add_WallScroller, P_SpawnScrollers * \author Steven McGranahan * \author Graue <graue@oceanbase.org> */ void T_Scroll(scroll_t *s) { fixed_t dx = s->dx, dy = s->dy; boolean is3dblock = false; if (s->control != -1) { // compute scroll amounts based on a sector's height changes fixed_t height = sectors[s->control].floorheight + sectors[s->control].ceilingheight; fixed_t delta = height - s->last_height; s->last_height = height; dx = FixedMul(dx, delta); dy = FixedMul(dy, delta); } if (s->accel) { s->vdx = dx += s->vdx; s->vdy = dy += s->vdy; } // if (!(dx | dy)) // no-op if both (x,y) offsets 0 // return; switch (s->type) { side_t *side; sector_t *sec; fixed_t height; msecnode_t *node; mobj_t *thing; line_t *line; size_t i; INT32 sect; case sc_side: // scroll wall texture side = sides + s->affectee; side->textureoffset += dx; side->rowoffset += dy; break; case sc_floor: // scroll floor texture sec = sectors + s->affectee; sec->floor_xoffs += dx; sec->floor_yoffs += dy; break; case sc_ceiling: // scroll ceiling texture sec = sectors + s->affectee; sec->ceiling_xoffs += dx; sec->ceiling_yoffs += dy; break; case sc_carry: sec = sectors + s->affectee; height = sec->floorheight; // sec is the control sector, find the real sector(s) to use for (i = 0; i < sec->linecount; i++) { line = sec->lines[i]; if (line->special < 100 || line->special >= 300) is3dblock = false; else is3dblock = true; if (!is3dblock) continue; for (sect = -1; (sect = P_FindSectorFromTag(line->tag, sect)) >= 0 ;) { sector_t *psec; psec = sectors + sect; for (node = psec->touching_thinglist; node; node = node->m_snext) { thing = node->m_thing; if (thing->flags2 & MF2_PUSHED) // Already pushed this tic by an exclusive pusher. continue; height = P_GetSpecialBottomZ(thing, sec, psec); if (!(thing->flags & MF_NOCLIP)) // Thing must be clipped if (!(thing->flags & MF_NOGRAVITY || thing->z+thing->height != height)) // Thing must a) be non-floating and have z+height == height { // Move objects only if on floor // non-floating, and clipped. P_DoScrollMove(thing, dx, dy, s->exclusive); } } // end of for loop through touching_thinglist } // end of loop through sectors } if (!is3dblock) { for (node = sec->touching_thinglist; node; node = node->m_snext) { thing = node->m_thing; if (thing->flags2 & MF2_PUSHED) continue; height = P_GetSpecialBottomZ(thing, sec, sec); if (!(thing->flags & MF_NOCLIP) && (!(thing->flags & MF_NOGRAVITY || thing->z > height))) { // Move objects only if on floor or underwater, // non-floating, and clipped. P_DoScrollMove(thing, dx, dy, s->exclusive); } } } break; case sc_carry_ceiling: // carry on ceiling (FOF scrolling) sec = sectors + s->affectee; height = sec->ceilingheight; // sec is the control sector, find the real sector(s) to use for (i = 0; i < sec->linecount; i++) { line = sec->lines[i]; if (line->special < 100 || line->special >= 300) is3dblock = false; else is3dblock = true; if (!is3dblock) continue; for (sect = -1; (sect = P_FindSectorFromTag(line->tag, sect)) >= 0 ;) { sector_t *psec; psec = sectors + sect; for (node = psec->touching_thinglist; node; node = node->m_snext) { thing = node->m_thing; if (thing->flags2 & MF2_PUSHED) continue; height = P_GetSpecialTopZ(thing, sec, psec); if (!(thing->flags & MF_NOCLIP)) // Thing must be clipped if (!(thing->flags & MF_NOGRAVITY || thing->z != height))// Thing must a) be non-floating and have z == height { // Move objects only if on floor or underwater, // non-floating, and clipped. P_DoScrollMove(thing, dx, dy, s->exclusive); } } // end of for loop through touching_thinglist } // end of loop through sectors } if (!is3dblock) { for (node = sec->touching_thinglist; node; node = node->m_snext) { thing = node->m_thing; if (thing->flags2 & MF2_PUSHED) continue; height = P_GetSpecialTopZ(thing, sec, sec); if (!(thing->flags & MF_NOCLIP) && (!(thing->flags & MF_NOGRAVITY || thing->z+thing->height < height))) { // Move objects only if on floor or underwater, // non-floating, and clipped. P_DoScrollMove(thing, dx, dy, s->exclusive); } } } break; // end of sc_carry_ceiling } // end of switch } /** Adds a generalized scroller to the thinker list. * * \param type The enumerated type of scrolling. * \param dx x speed of scrolling or its acceleration. * \param dy y speed of scrolling or its acceleration. * \param control Sector whose heights control this scroller's effect * remotely, or -1 if there is no control sector. * \param affectee Index of the affected object, sector or sidedef. * \param accel Nonzero for an accelerative effect. * \sa Add_WallScroller, P_SpawnScrollers, T_Scroll */ static void Add_Scroller(INT32 type, fixed_t dx, fixed_t dy, INT32 control, INT32 affectee, INT32 accel, INT32 exclusive) { scroll_t *s = Z_Calloc(sizeof *s, PU_LEVSPEC, NULL); s->thinker.function.acp1 = (actionf_p1)T_Scroll; s->type = type; s->dx = dx; s->dy = dy; s->accel = accel; s->exclusive = exclusive; s->vdx = s->vdy = 0; if ((s->control = control) != -1) s->last_height = sectors[control].floorheight + sectors[control].ceilingheight; s->affectee = affectee; P_AddThinker(&s->thinker); } /** Adds a wall scroller. * Scroll amount is rotated with respect to wall's linedef first, so that * scrolling towards the wall in a perpendicular direction is translated into * vertical motion, while scrolling along the wall in a parallel direction is * translated into horizontal motion. * * \param dx x speed of scrolling or its acceleration. * \param dy y speed of scrolling or its acceleration. * \param l Line whose front side will scroll. * \param control Sector whose heights control this scroller's effect * remotely, or -1 if there is no control sector. * \param accel Nonzero for an accelerative effect. * \sa Add_Scroller, P_SpawnScrollers */ static void Add_WallScroller(fixed_t dx, fixed_t dy, const line_t *l, INT32 control, INT32 accel) { fixed_t x = abs(l->dx), y = abs(l->dy), d; if (y > x) d = x, x = y, y = d; d = FixedDiv(x, FINESINE((tantoangle[FixedDiv(y, x) >> DBITS] + ANGLE_90) >> ANGLETOFINESHIFT)); x = -FixedDiv(FixedMul(dy, l->dy) + FixedMul(dx, l->dx), d); y = -FixedDiv(FixedMul(dx, l->dy) - FixedMul(dy, l->dx), d); Add_Scroller(sc_side, x, y, control, *l->sidenum, accel, 0); } /** Initializes the scrollers. * * \todo Get rid of all the magic numbers. * \sa P_SpawnSpecials, Add_Scroller, Add_WallScroller */ static void P_SpawnScrollers(void) { size_t i; line_t *l = lines; for (i = 0; i < numlines; i++, l++) { fixed_t dx = l->dx >> SCROLL_SHIFT; // direction and speed of scrolling fixed_t dy = l->dy >> SCROLL_SHIFT; INT32 control = -1, accel = 0; // no control sector or acceleration INT32 special = l->special; // These types are same as the ones they get set to except that the // first side's sector's heights cause scrolling when they change, and // this linedef controls the direction and speed of the scrolling. The // most complicated linedef since donuts, but powerful :) if (special == 515 || special == 512 || special == 522 || special == 532 || special == 504) // displacement scrollers { special -= 2; control = (INT32)(sides[*l->sidenum].sector - sectors); } else if (special == 514 || special == 511 || special == 521 || special == 531 || special == 503) // accelerative scrollers { special--; accel = 1; control = (INT32)(sides[*l->sidenum].sector - sectors); } else if (special == 535 || special == 525) // displacement scrollers { special -= 2; control = (INT32)(sides[*l->sidenum].sector - sectors); } else if (special == 534 || special == 524) // accelerative scrollers { accel = 1; special--; control = (INT32)(sides[*l->sidenum].sector - sectors); } switch (special) { register INT32 s; case 513: // scroll effect ceiling case 533: // scroll and carry objects on ceiling for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Scroller(sc_ceiling, -dx, dy, control, s, accel, l->flags & ML_NOCLIMB); if (special != 533) break; case 523: // carry objects on ceiling dx = FixedMul(dx, CARRYFACTOR); dy = FixedMul(dy, CARRYFACTOR); for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Scroller(sc_carry_ceiling, dx, dy, control, s, accel, l->flags & ML_NOCLIMB); break; case 510: // scroll effect floor case 530: // scroll and carry objects on floor for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Scroller(sc_floor, -dx, dy, control, s, accel, l->flags & ML_NOCLIMB); if (special != 530) break; case 520: // carry objects on floor dx = FixedMul(dx, CARRYFACTOR); dy = FixedMul(dy, CARRYFACTOR); for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Scroller(sc_carry, dx, dy, control, s, accel, l->flags & ML_NOCLIMB); break; // scroll wall according to linedef // (same direction and speed as scrolling floors) case 502: for (s = -1; (s = P_FindLineFromLineTag(l, s)) >= 0 ;) if (s != (INT32)i) Add_WallScroller(dx, dy, lines+s, control, accel); break; case 505: s = lines[i].sidenum[0]; Add_Scroller(sc_side, -sides[s].textureoffset, sides[s].rowoffset, -1, s, accel, 0); break; case 506: s = lines[i].sidenum[1]; if (s != 0xffff) Add_Scroller(sc_side, -sides[s].textureoffset, sides[s].rowoffset, -1, lines[i].sidenum[0], accel, 0); else CONS_Debug(DBG_GAMELOGIC, "Line special 506 (line #%s) missing 2nd side!\n", sizeu1(i)); break; case 500: // scroll first side Add_Scroller(sc_side, FRACUNIT, 0, -1, lines[i].sidenum[0], accel, 0); break; case 501: // jff 1/30/98 2-way scroll Add_Scroller(sc_side, -FRACUNIT, 0, -1, lines[i].sidenum[0], accel, 0); break; } } } /** Adds master appear/disappear thinker. * * \param appeartime tics to be existent * \param disappeartime tics to be nonexistent * \param sector pointer to control sector */ static void Add_MasterDisappearer(tic_t appeartime, tic_t disappeartime, tic_t offset, INT32 line, INT32 sourceline) { disappear_t *d = Z_Malloc(sizeof *d, PU_LEVSPEC, NULL); d->thinker.function.acp1 = (actionf_p1)T_Disappear; d->appeartime = appeartime; d->disappeartime = disappeartime; d->offset = offset; d->affectee = line; d->sourceline = sourceline; d->exists = true; d->timer = 1; P_AddThinker(&d->thinker); } /** Makes a FOF appear/disappear * * \param d Disappear thinker. * \sa Add_MasterDisappearer */ void T_Disappear(disappear_t *d) { if (d->offset && !d->exists) { d->offset--; return; } if (--d->timer <= 0) { ffloor_t *rover; register INT32 s; for (s = -1; (s = P_FindSectorFromLineTag(&lines[d->affectee], s)) >= 0 ;) { for (rover = sectors[s].ffloors; rover; rover = rover->next) { if (rover->master != &lines[d->affectee]) continue; if (d->exists) rover->flags &= ~FF_EXISTS; else { rover->flags |= FF_EXISTS; if (!(lines[d->sourceline].flags & ML_NOCLIMB)) { sectors[s].soundorg.z = *rover->topheight; S_StartSound(&sectors[s].soundorg, sfx_appear); } } } sectors[s].moved = true; } if (d->exists) { d->timer = d->disappeartime; d->exists = false; } else { d->timer = d->appeartime; d->exists = true; } } } /* SoM: 3/8/2000: Friction functions start. Add_Friction, T_Friction, P_SpawnFriction */ /** Adds friction thinker. * * \param friction Friction value, 0xe800 is normal. * \param movefactor Inertia factor. * \param affectee Target sector. * \param roverfriction FOF or not * \sa T_Friction, P_SpawnFriction */ static void Add_Friction(INT32 friction, INT32 movefactor, INT32 affectee, INT32 referrer) { friction_t *f = Z_Calloc(sizeof *f, PU_LEVSPEC, NULL); f->thinker.function.acp1 = (actionf_p1)T_Friction; f->friction = friction; f->movefactor = movefactor; f->affectee = affectee; if (referrer != -1) { f->roverfriction = true; f->referrer = referrer; } else f->roverfriction = false; P_AddThinker(&f->thinker); } /** Applies friction to all things in a sector. * * \param f Friction thinker. * \sa Add_Friction */ void T_Friction(friction_t *f) { sector_t *sec, *referrer = NULL; mobj_t *thing; msecnode_t *node; sec = sectors + f->affectee; // Make sure the sector type hasn't changed if (f->roverfriction) { referrer = sectors + f->referrer; if (!(GETSECSPECIAL(referrer->special, 3) == 1 || GETSECSPECIAL(referrer->special, 3) == 3)) return; } else { if (!(GETSECSPECIAL(sec->special, 3) == 1 || GETSECSPECIAL(sec->special, 3) == 3)) return; } // Assign the friction value to players on the floor, non-floating, // and clipped. Normally the object's friction value is kept at // ORIG_FRICTION and this thinker changes it for icy or muddy floors. // When the object is straddling sectors with the same // floorheight that have different frictions, use the lowest // friction value (muddy has precedence over icy). node = sec->touching_thinglist; // things touching this sector while (node) { thing = node->m_thing; // apparently, all I had to do was comment out part of the next line and // friction works for all mobj's // (or at least MF_PUSHABLEs, which is all I care about anyway) if (!(thing->flags & (MF_NOGRAVITY | MF_NOCLIP)) && thing->z == thing->floorz) { if (f->roverfriction) { if (thing->floorz != P_GetSpecialTopZ(thing, referrer, sec)) { node = node->m_snext; continue; } if ((thing->friction == ORIG_FRICTION) // normal friction? || (f->friction < thing->friction)) { thing->friction = f->friction; thing->movefactor = f->movefactor; } } else if (P_GetSpecialBottomZ(thing, sec, sec) == thing->floorz && (thing->friction == ORIG_FRICTION // normal friction? || f->friction < thing->friction)) { thing->friction = f->friction; thing->movefactor = f->movefactor; } } node = node->m_snext; } } /** Spawns all friction effects. * * \sa P_SpawnSpecials, Add_Friction */ static void P_SpawnFriction(void) { size_t i; line_t *l = lines; register INT32 s; fixed_t length; // line length controls magnitude fixed_t friction; // friction value to be applied during movement INT32 movefactor; // applied to each player move to simulate inertia for (i = 0; i < numlines; i++, l++) if (l->special == 540) { length = P_AproxDistance(l->dx, l->dy)>>FRACBITS; friction = (0x1EB8*length)/0x80 + 0xD000; if (friction > FRACUNIT) friction = FRACUNIT; if (friction < 0) friction = 0; // The following check might seem odd. At the time of movement, // the move distance is multiplied by 'friction/0x10000', so a // higher friction value actually means 'less friction'. if (friction > ORIG_FRICTION) // ice movefactor = ((0x10092 - friction)*(0x70))/0x158; else movefactor = ((friction - 0xDB34)*(0xA))/0x80; // killough 8/28/98: prevent odd situations if (movefactor < 32) movefactor = 32; for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Friction(friction, movefactor, s, -1); } } /* SoM: 3/8/2000: Push/Pull/Wind/Current functions. Add_Pusher, PIT_PushThing, T_Pusher, P_GetPushThing, P_SpawnPushers */ #define PUSH_FACTOR 7 /** Adds a pusher. * * \param type Type of push/pull effect. * \param x_mag X magnitude. * \param y_mag Y magnitude. * \param source For a point pusher/puller, the source object. * \param affectee Target sector. * \param referrer What sector set it * \sa T_Pusher, P_GetPushThing, P_SpawnPushers */ static void Add_Pusher(pushertype_e type, fixed_t x_mag, fixed_t y_mag, mobj_t *source, INT32 affectee, INT32 referrer, INT32 exclusive, INT32 slider) { pusher_t *p = Z_Calloc(sizeof *p, PU_LEVSPEC, NULL); p->thinker.function.acp1 = (actionf_p1)T_Pusher; p->source = source; p->type = type; p->x_mag = x_mag>>FRACBITS; p->y_mag = y_mag>>FRACBITS; p->exclusive = exclusive; p->slider = slider; if (referrer != -1) { p->roverpusher = true; p->referrer = referrer; } else p->roverpusher = false; // "The right triangle of the square of the length of the hypotenuse is equal to the sum of the squares of the lengths of the other two sides." // "Bah! Stupid brains! Don't you know anything besides the Pythagorean Theorem?" - Earthworm Jim if (type == p_downcurrent || type == p_upcurrent || type == p_upwind || type == p_downwind) p->magnitude = P_AproxDistance(p->x_mag,p->y_mag)<<(FRACBITS-PUSH_FACTOR); else p->magnitude = P_AproxDistance(p->x_mag,p->y_mag); if (source) // point source exist? { // where force goes to zero if (type == p_push) p->radius = AngleFixed(source->angle); else p->radius = (p->magnitude)<<(FRACBITS+1); p->x = p->source->x; p->y = p->source->y; p->z = p->source->z; } p->affectee = affectee; P_AddThinker(&p->thinker); } // PIT_PushThing determines the angle and magnitude of the effect. // The object's x and y momentum values are changed. static pusher_t *tmpusher; // pusher structure for blockmap searches /** Applies a point pusher/puller to a thing. * * \param thing Thing to be pushed. * \return True if the thing was pushed. * \todo Make a more robust P_BlockThingsIterator() so the hidden parameter * ::tmpusher won't need to be used. * \sa T_Pusher */ static inline boolean PIT_PushThing(mobj_t *thing) { if (thing->flags2 & MF2_PUSHED) return false; if (thing->player && thing->player->pflags & PF_ROPEHANG) return false; // Allow this to affect pushable objects at some point? if (thing->player && (!(thing->flags & (MF_NOGRAVITY | MF_NOCLIP)) || thing->player->pflags & PF_NIGHTSMODE)) { INT32 dist; INT32 speed; INT32 sx, sy, sz; sx = tmpusher->x; sy = tmpusher->y; sz = tmpusher->z; // don't fade wrt Z if health & 2 (mapthing has multi flag) if (tmpusher->source->health & 2) dist = P_AproxDistance(thing->x - sx,thing->y - sy); else { // Make sure the Z is in range if (thing->z < sz - tmpusher->radius || thing->z > sz + tmpusher->radius) return false; dist = P_AproxDistance(P_AproxDistance(thing->x - sx, thing->y - sy), thing->z - sz); } speed = (tmpusher->magnitude - ((dist>>FRACBITS)>>1))<<(FRACBITS - PUSH_FACTOR - 1); // If speed <= 0, you're outside the effective radius. You also have // to be able to see the push/pull source point. // Written with bits and pieces of P_HomingAttack if ((speed > 0) && (P_CheckSight(thing, tmpusher->source))) { if (!(thing->player->pflags & PF_NIGHTSMODE)) { // only push wrt Z if health & 1 (mapthing has ambush flag) if (tmpusher->source->health & 1) { fixed_t tmpmomx, tmpmomy, tmpmomz; tmpmomx = FixedMul(FixedDiv(sx - thing->x, dist), speed); tmpmomy = FixedMul(FixedDiv(sy - thing->y, dist), speed); tmpmomz = FixedMul(FixedDiv(sz - thing->z, dist), speed); if (tmpusher->source->type == MT_PUSH) // away! { tmpmomx *= -1; tmpmomy *= -1; tmpmomz *= -1; } thing->momx += tmpmomx; thing->momy += tmpmomy; thing->momz += tmpmomz; if (thing->player) { thing->player->cmomx += tmpmomx; thing->player->cmomy += tmpmomy; thing->player->cmomx = FixedMul(thing->player->cmomx, 0xe800); thing->player->cmomy = FixedMul(thing->player->cmomy, 0xe800); } } else { angle_t pushangle; pushangle = R_PointToAngle2(thing->x, thing->y, sx, sy); if (tmpusher->source->type == MT_PUSH) pushangle += ANGLE_180; // away pushangle >>= ANGLETOFINESHIFT; thing->momx += FixedMul(speed, FINECOSINE(pushangle)); thing->momy += FixedMul(speed, FINESINE(pushangle)); if (thing->player) { thing->player->cmomx += FixedMul(speed, FINECOSINE(pushangle)); thing->player->cmomy += FixedMul(speed, FINESINE(pushangle)); thing->player->cmomx = FixedMul(thing->player->cmomx, 0xe800); thing->player->cmomy = FixedMul(thing->player->cmomy, 0xe800); } } } else { //NiGHTS-specific handling. //By default, pushes and pulls only affect the Z-axis. //By having the ambush flag, it affects the X-axis. //By having the object special flag, it affects the Y-axis. fixed_t tmpmomx, tmpmomy, tmpmomz; if (tmpusher->source->health & 1) tmpmomx = FixedMul(FixedDiv(sx - thing->x, dist), speed); else tmpmomx = 0; if (tmpusher->source->health & 2) tmpmomy = FixedMul(FixedDiv(sy - thing->y, dist), speed); else tmpmomy = 0; tmpmomz = FixedMul(FixedDiv(sz - thing->z, dist), speed); if (tmpusher->source->type == MT_PUSH) // away! { tmpmomx *= -1; tmpmomy *= -1; tmpmomz *= -1; } thing->momx += tmpmomx; thing->momy += tmpmomy; thing->momz += tmpmomz; if (thing->player) { thing->player->cmomx += tmpmomx; thing->player->cmomy += tmpmomy; thing->player->cmomx = FixedMul(thing->player->cmomx, 0xe800); thing->player->cmomy = FixedMul(thing->player->cmomy, 0xe800); } } } } if (tmpusher->exclusive) thing->flags2 |= MF2_PUSHED; return true; } /** Applies a pusher to all affected objects. * * \param p Thinker for the pusher effect. * \todo Split up into multiple functions. * \sa Add_Pusher, PIT_PushThing */ void T_Pusher(pusher_t *p) { sector_t *sec, *referrer = NULL; mobj_t *thing; msecnode_t *node; INT32 xspeed = 0,yspeed = 0; INT32 xl, xh, yl, yh, bx, by; INT32 radius; //INT32 ht = 0; boolean inFOF; boolean touching; boolean moved; xspeed = yspeed = 0; sec = sectors + p->affectee; // Be sure the special sector type is still turned on. If so, proceed. // Else, bail out; the sector type has been changed on us. if (p->roverpusher) { referrer = &sectors[p->referrer]; if (!(GETSECSPECIAL(referrer->special, 3) == 2 || GETSECSPECIAL(referrer->special, 3) == 3)) return; } else if (!(GETSECSPECIAL(sec->special, 3) == 2 || GETSECSPECIAL(sec->special, 3) == 3)) return; // For constant pushers (wind/current) there are 3 situations: // // 1) Affected Thing is above the floor. // // Apply the full force if wind, no force if current. // // 2) Affected Thing is on the ground. // // Apply half force if wind, full force if current. // // 3) Affected Thing is below the ground (underwater effect). // // Apply no force if wind, full force if current. // // Apply the effect to clipped players only for now. // // In Phase II, you can apply these effects to Things other than players. if (p->type == p_push) { // Seek out all pushable things within the force radius of this // point pusher. Crosses sectors, so use blockmap. tmpusher = p; // MT_PUSH/MT_PULL point source radius = p->radius; // where force goes to zero tmbbox[BOXTOP] = p->y + radius; tmbbox[BOXBOTTOM] = p->y - radius; tmbbox[BOXRIGHT] = p->x + radius; tmbbox[BOXLEFT] = p->x - radius; xl = (unsigned)(tmbbox[BOXLEFT] - bmaporgx - MAXRADIUS)>>MAPBLOCKSHIFT; xh = (unsigned)(tmbbox[BOXRIGHT] - bmaporgx + MAXRADIUS)>>MAPBLOCKSHIFT; yl = (unsigned)(tmbbox[BOXBOTTOM] - bmaporgy - MAXRADIUS)>>MAPBLOCKSHIFT; yh = (unsigned)(tmbbox[BOXTOP] - bmaporgy + MAXRADIUS)>>MAPBLOCKSHIFT; for (bx = xl; bx <= xh; bx++) for (by = yl; by <= yh; by++) P_BlockThingsIterator(bx,by, PIT_PushThing); return; } // constant pushers p_wind and p_current node = sec->touching_thinglist; // things touching this sector for (; node; node = node->m_snext) { thing = node->m_thing; if (thing->flags & (MF_NOGRAVITY | MF_NOCLIP) && !(thing->type == MT_SMALLBUBBLE || thing->type == MT_MEDIUMBUBBLE || thing->type == MT_EXTRALARGEBUBBLE)) continue; if (!(thing->flags & MF_PUSHABLE) && !(thing->type == MT_PLAYER || thing->type == MT_SMALLBUBBLE || thing->type == MT_MEDIUMBUBBLE || thing->type == MT_EXTRALARGEBUBBLE || thing->type == MT_LITTLETUMBLEWEED || thing->type == MT_BIGTUMBLEWEED)) continue; if (thing->flags2 & MF2_PUSHED) continue; if (thing->player && thing->player->pflags & PF_ROPEHANG) continue; if (thing->player && (thing->state == &states[thing->info->painstate]) && (thing->player->powers[pw_flashing] > (flashingtics/4)*3 && thing->player->powers[pw_flashing] <= flashingtics)) continue; inFOF = touching = moved = false; // Find the area that the 'thing' is in if (p->roverpusher) { fixed_t top, bottom; top = P_GetSpecialTopZ(thing, referrer, sec); bottom = P_GetSpecialBottomZ(thing, referrer, sec); if (thing->eflags & MFE_VERTICALFLIP) { if (bottom > thing->z + thing->height || top < (thing->z + (thing->height >> 1))) continue; if (thing->z < bottom) touching = true; if (thing->z + (thing->height >> 1) > bottom) inFOF = true; } else { if (top < thing->z || referrer->floorheight > (thing->z + (thing->height >> 1))) continue; if (thing->z + thing->height > top) touching = true; if (thing->z + (thing->height >> 1) < top) inFOF = true; } } else // Treat the entire sector as one big FOF { if (thing->z == P_GetSpecialBottomZ(thing, sec, sec)) touching = true; else if (p->type != p_current) inFOF = true; } if (!touching && !inFOF) // Object is out of range of effect continue; if (p->type == p_wind) { if (touching) // on ground { xspeed = (p->x_mag)>>1; // half force yspeed = (p->y_mag)>>1; moved = true; } else if (inFOF) { xspeed = (p->x_mag); // full force yspeed = (p->y_mag); moved = true; } } else if (p->type == p_upwind) { if (touching) // on ground { thing->momz += (p->magnitude)>>1; moved = true; } else if (inFOF) { thing->momz += p->magnitude; moved = true; } } else if (p->type == p_downwind) { if (touching) // on ground { thing->momz -= (p->magnitude)>>1; moved = true; } else if (inFOF) { thing->momz -= p->magnitude; moved = true; } } else // p_current { if (!touching && !inFOF) // Not in water at all xspeed = yspeed = 0; // no force else // underwater / touching water { if (p->type == p_upcurrent) thing->momz += p->magnitude; else if (p->type == p_downcurrent) thing->momz -= p->magnitude; else { xspeed = p->x_mag; // full force yspeed = p->y_mag; } moved = true; } } if (p->type != p_downcurrent && p->type != p_upcurrent && p->type != p_upwind && p->type != p_downwind) { thing->momx += xspeed<<(FRACBITS-PUSH_FACTOR); thing->momy += yspeed<<(FRACBITS-PUSH_FACTOR); if (thing->player) { thing->player->cmomx += xspeed<<(FRACBITS-PUSH_FACTOR); thing->player->cmomy += yspeed<<(FRACBITS-PUSH_FACTOR); thing->player->cmomx = FixedMul(thing->player->cmomx, ORIG_FRICTION); thing->player->cmomy = FixedMul(thing->player->cmomy, ORIG_FRICTION); } // Tumbleweeds bounce a bit... if (thing->type == MT_LITTLETUMBLEWEED || thing->type == MT_BIGTUMBLEWEED) thing->momz += P_AproxDistance(xspeed<<(FRACBITS-PUSH_FACTOR), yspeed<<(FRACBITS-PUSH_FACTOR)) >> 2; } if (moved) { if (p->slider && thing->player) { boolean jumped = (thing->player->pflags & PF_JUMPED); P_ResetPlayer (thing->player); if (jumped) thing->player->pflags |= PF_JUMPED; thing->player->pflags |= PF_SLIDING; P_SetPlayerMobjState (thing, thing->info->painstate); // Whee! thing->angle = R_PointToAngle2 (0, 0, xspeed<<(FRACBITS-PUSH_FACTOR), yspeed<<(FRACBITS-PUSH_FACTOR)); if (!demoplayback || P_AnalogMove(thing->player)) { if (thing->player == &players[consoleplayer]) { if (thing->angle - localangle > ANGLE_180) localangle -= (localangle - thing->angle) / 8; else localangle += (thing->angle - localangle) / 8; } else if (thing->player == &players[secondarydisplayplayer]) { if (thing->angle - localangle2 > ANGLE_180) localangle2 -= (localangle2 - thing->angle) / 8; else localangle2 += (thing->angle - localangle2) / 8; } /*if (thing->player == &players[consoleplayer]) localangle = thing->angle; else if (thing->player == &players[secondarydisplayplayer]) localangle2 = thing->angle;*/ } } if (p->exclusive) thing->flags2 |= MF2_PUSHED; } } } /** Gets a push/pull object. * * \param s Sector number to look in. * \return Pointer to the first ::MT_PUSH or ::MT_PULL object found in the * sector. * \sa P_GetTeleportDestThing, P_GetStarpostThing, P_GetAltViewThing */ mobj_t *P_GetPushThing(UINT32 s) { mobj_t *thing; sector_t *sec; sec = sectors + s; thing = sec->thinglist; while (thing) { switch (thing->type) { case MT_PUSH: case MT_PULL: return thing; default: break; } thing = thing->snext; } return NULL; } /** Spawns pushers. * * \todo Remove magic numbers. * \sa P_SpawnSpecials, Add_Pusher */ static void P_SpawnPushers(void) { size_t i; line_t *l = lines; register INT32 s; mobj_t *thing; for (i = 0; i < numlines; i++, l++) switch (l->special) { case 541: // wind for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Pusher(p_wind, l->dx, l->dy, NULL, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); break; case 544: // current for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Pusher(p_current, l->dx, l->dy, NULL, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); break; case 547: // push/pull for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) { thing = P_GetPushThing(s); if (thing) // No MT_P* means no effect Add_Pusher(p_push, l->dx, l->dy, thing, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); } break; case 545: // current up for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Pusher(p_upcurrent, l->dx, l->dy, NULL, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); break; case 546: // current down for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Pusher(p_downcurrent, l->dx, l->dy, NULL, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); break; case 542: // wind up for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Pusher(p_upwind, l->dx, l->dy, NULL, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); break; case 543: // wind down for (s = -1; (s = P_FindSectorFromLineTag(l, s)) >= 0 ;) Add_Pusher(p_downwind, l->dx, l->dy, NULL, s, -1, l->flags & ML_NOCLIMB, l->flags & ML_EFFECT4); break; } } static void P_SearchForDisableLinedefs(void) { size_t i; INT32 j; // Look for disable linedefs for (i = 0; i < numlines; i++) { if (lines[i].special == 6) { // Remove special // Do *not* remove tag. That would mess with the tag lists // that P_InitTagLists literally just created! lines[i].special = 0; // Ability flags can disable disable linedefs now, lol if (netgame || multiplayer) { // future: nonet flag? } else if ((lines[i].flags & ML_NETONLY) == ML_NETONLY) continue; // Net-only never triggers in single player else if (players[consoleplayer].charability == CA_THOK && (lines[i].flags & ML_NOSONIC)) continue; else if (players[consoleplayer].charability == CA_FLY && (lines[i].flags & ML_NOTAILS)) continue; else if (players[consoleplayer].charability == CA_GLIDEANDCLIMB && (lines[i].flags & ML_NOKNUX)) continue; // Disable any linedef specials with our tag. for (j = -1; (j = P_FindLineFromLineTag(&lines[i], j)) >= 0;) lines[j].special = 0; } } }
gpl-2.0
tb-303/GFRG110
arch/arm/plat-feroceon/mv_hal/voiceband/slic/silabs/arch_marvell/timer.c
2
1183
/* ** $Id: dummy_timer.c 109 2008-10-22 19:45:09Z lajordan@SILABS.COM $ ** ** system.c ** System specific functions implementation file ** ** Author(s): ** laj ** ** Distributed by: ** Silicon Laboratories, Inc ** ** File Description: ** This is the implementation file for the system specific functions like timer functions. ** ** Dependancies: ** datatypes.h ** */ #include "timer.h" /* ** Function: SYSTEM_TimerInit */ void TimerInit (systemTimer_S *pTimerObj) { } /* ** Function: SYSTEM_Delay */ int time_DelayWrapper (void *hTimer, int timeInMs) { mvOsDelay(timeInMs); return 0; } /* ** Function: SYSTEM_TimeElapsed */ int time_TimeElapsedWrapper (void *hTimer, void *startTime, int *timeInMs) { *timeInMs = 1000; return 0; } /* ** Function: SYSTEM_GetTime */ int time_GetTimeWrapper (void *hTimer, void *time) { // time->timestamp=0; return 0; } /* ** $Log: dummy_timer.c,v $ ** Revision 1.3 2008/03/13 18:40:03 lajordan ** fixed for si3226 ** ** Revision 1.2 2007/10/22 21:38:31 lajordan ** fixed some warnings ** ** Revision 1.1 2007/10/22 20:49:21 lajordan ** no message ** ** */
gpl-2.0
gioman/QGIS
src/gui/qgscodeeditorsql.cpp
2
2419
/*************************************************************************** qgscodeeditorsql.cpp - A SQL editor based on QScintilla -------------------------------------- Date : 06-Oct-2013 Copyright : (C) 2013 by Salvatore Larosa Email : lrssvtml (at) gmail (dot) com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsapplication.h" #include "qgscodeeditorsql.h" #include <QWidget> #include <QString> #include <QFont> #include <QLabel> #include <Qsci/qscilexersql.h> QgsCodeEditorSQL::QgsCodeEditorSQL( QWidget *parent ) : QgsCodeEditor( parent ) { if ( !parent ) { setTitle( tr( "SQL Editor" ) ); } setMarginVisible( false ); setFoldingVisible( true ); setAutoCompletionCaseSensitivity( false ); setSciLexerSQL(); } /** Internal use. setAutoCompletionCaseSensitivity( false ) is not sufficient when installing a lexer, since its caseSensitive() method is actually used, and defaults to true. @note not available in Python bindings @ingroup gui */ class QgsCaseInsensitiveLexerSQL: public QsciLexerSQL { public: //! constructor explicit QgsCaseInsensitiveLexerSQL( QObject *parent = 0 ) : QsciLexerSQL( parent ) {} bool caseSensitive() const override { return false; } }; void QgsCodeEditorSQL::setSciLexerSQL() { QFont font = getMonospaceFont(); #ifdef Q_OS_MAC // The font size gotten from getMonospaceFont() is too small on Mac font.setPointSize( QLabel().font().pointSize() ); #endif QsciLexerSQL *sqlLexer = new QgsCaseInsensitiveLexerSQL( this ); sqlLexer->setDefaultFont( font ); sqlLexer->setFont( font, -1 ); font.setBold( true ); sqlLexer->setFont( font, QsciLexerSQL::Keyword ); sqlLexer->setColor( Qt::darkYellow, QsciLexerSQL::DoubleQuotedString ); // fields setLexer( sqlLexer ); }
gpl-2.0
jobovy/nemo
src/nbody/evolve/kawai/nbody/second.c
2
3875
/* #define RETURN_CPU */ /* * SECOND.c : returns the CPU time * * Jun Makino 89/09/20 Ver. 1.00 (created) * 89/10/02 Ver. 1.01 debugged on UNIX system. * * functions : * void timer_init() : reset the timer * void second(&double): returns the cpu seconds * void cpumin(&double): returns the cpu minutes */ #ifdef MSC #include <time.h> static long start_time; void timer_init() { time(&start_time); } second(dtime) double * dtime; { long int timenow; time(&timenow); *dtime = timenow - start_time; } cpumin(dmin) double *dmin ; { double sec; second(&sec); *dmin = sec/60.0; } #else /* * SECONDS: returns number of CPU seconds spent so far * This version can be called from fortran routines * in double precision mode */ #include <stdio.h> #include <sys/types.h> #include <sys/time.h> #include <sys/times.h> #include <sys/resource.h> #include <limits.h> static double tstart; static struct timeval timearg; static struct timezone zonearg; #define RETURN_CPU void timer_init() { #ifndef RETUEN_CPU if(gettimeofday(&timearg,&zonearg)){ fprintf(stderr,"Time initialization failed\n"); } tstart = timearg.tv_sec + timearg.tv_usec*1e-6; #else #ifndef SOLARIS struct rusage usage; if(getrusage(RUSAGE_SELF,&usage)){ fprintf(stderr,"getrusage failed\n"); } tstart = usage.ru_utime.tv_sec + usage.ru_utime.tv_usec*1e-6; #else struct tms buffer; times(&buffer); tstart = (buffer.tms_utime + 0.0)/CLK_TCK; #endif #endif } void tminit_() { timer_init(); } cpumin(t) double * t; { xcpumin_(t); } double cpusec() { double t; second(&t); return t; } second(t) double * t; { second_(t); } xcpumin_(t) double*t; { double sec; second_(&sec); *t=sec/60.0; } second_(t) double *t; { #ifdef RETURN_CPU #ifdef SOLARIS struct tms buffer; if (times(&buffer) == -1) { printf("times() call failed\n"); exit(1); } *t = (buffer.tms_utime / (CLK_TCK+0.0)); #else struct rusage usage; if(getrusage(RUSAGE_SELF,&usage)){ fprintf(stderr,"getrusage failed\n"); } *t = usage.ru_utime.tv_sec + usage.ru_utime.tv_usec*1e-6; #endif #else #ifndef xxxx if(gettimeofday(&timearg,&zonearg)){ fprintf(stderr,"Timer failed\n"); } *t = timearg.tv_sec + timearg.tv_usec*1e-6 - tstart; #else struct tms tbuf; int it0; it0 = times(&tbuf); if(it0 == -1){ fprintf(stderr,"Time initialization failed\n"); } *t = it0*0.01 - tstart; #endif #endif } #endif #ifdef TEST main() { double t; timer_init(); for(;;){ second(&t); printf("time = %f\n", t); } } #endif #ifdef SHORT_REAL # define COMPTYPE float #else # define COMPTYPE double #endif typedef union double_and_int{ unsigned int iwork; COMPTYPE fwork; } DOUBLE_INT; void c_assignbody_(pos,cpos,bsub) register COMPTYPE pos[]; register COMPTYPE cpos[]; int * bsub; { register int k,l,k1,k2; register DOUBLE_INT tmpx,tmpy,tmpz ; #if 1 k = k1 = k2 =0; if(pos[0] >= cpos[0]) k=1; if(pos[1] >= cpos[1]) k1=2; if(pos[2] >= cpos[2]) k2=4; *bsub = 1 + k + k1 + k2; #endif #if 0 k += signbit(pos[0]-cpos[0]); k += signbit(pos[1]-cpos[1])*2; k += signbit(pos[2]-cpos[2])*4; k = (k + 7)>>1; *bsub += k; #endif #if 0 tmpx.fwork = cpos[0]-pos[0]; tmpy.fwork = cpos[1]-pos[1]; tmpz.fwork = cpos[2]-pos[2]; *bsub = 1+(tmpx.iwork >> 31) + ((tmpy.iwork >> 30) & 2 ) + ((tmpz.iwork >> 29) & 4 ); #if 0 if(pos[0] >= cpos[0]) k++; if(pos[1] >= cpos[1]) k+=2; if(pos[2] >= cpos[2]) k+=4; printf("%f %f %f %x %x %x %x %x %x %x %x\n", tmpx.fwork,tmpy.fwork,tmpz.fwork, tmpx.iwork,tmpy.iwork,tmpz.iwork, tmpx.iwork>>31,tmpy.iwork>>30,tmpz.iwork>>29, *bsub,k); #endif #endif }
gpl-2.0
lbt/hijack
drivers/scsi/sr_ioctl.c
2
18479
#include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/blk.h> #include "scsi.h" #include "hosts.h" #include <scsi/scsi_ioctl.h> #include <linux/cdrom.h> #include "sr.h" #if 0 # define DEBUG #endif /* The sr_is_xa() seems to trigger firmware bugs with some drives :-( * It is off by default and can be turned on with this module parameter */ static int xa_test = 0; extern void get_sectorsize(int); #define IOCTL_RETRIES 3 /* The CDROM is fairly slow, so we need a little extra time */ /* In fact, it is very slow if it has to spin up first */ #define IOCTL_TIMEOUT 30*HZ static void sr_ioctl_done(Scsi_Cmnd * SCpnt) { struct request * req; req = &SCpnt->request; req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) { memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen); scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511); SCpnt->buffer = req->buffer; } if (req->sem != NULL) { up(req->sem); } } /* We do our own retries because we want to know what the specific error code is. Normally the UNIT_ATTENTION code will automatically clear after one error */ int sr_do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength, int quiet) { Scsi_Cmnd * SCpnt; Scsi_Device * SDev; int result, err = 0, retries = 0; unsigned long flags; char * bounce_buffer; spin_lock_irqsave(&io_request_lock, flags); SDev = scsi_CDs[target].device; SCpnt = scsi_allocate_device(NULL, scsi_CDs[target].device, 1); spin_unlock_irqrestore(&io_request_lock, flags); /* use ISA DMA buffer if necessary */ SCpnt->request.buffer=buffer; if (buffer && SCpnt->host->unchecked_isa_dma && (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) { bounce_buffer = (char *)scsi_malloc((buflength + 511) & ~511); if (bounce_buffer == NULL) { printk("SCSI DMA pool exhausted."); return -ENOMEM; } memcpy(bounce_buffer, (char *)buffer, buflength); buffer = bounce_buffer; } retry: if( !scsi_block_when_processing_errors(SDev) ) return -ENODEV; { struct semaphore sem = MUTEX_LOCKED; SCpnt->request.sem = &sem; spin_lock_irqsave(&io_request_lock, flags); scsi_do_cmd(SCpnt, (void *) sr_cmd, buffer, buflength, sr_ioctl_done, IOCTL_TIMEOUT, IOCTL_RETRIES); spin_unlock_irqrestore(&io_request_lock, flags); down(&sem); SCpnt->request.sem = NULL; } result = SCpnt->result; /* Minimal error checking. Ignore cases we know about, and report the rest. */ if(driver_byte(result) != 0) { switch(SCpnt->sense_buffer[2] & 0xf) { case UNIT_ATTENTION: scsi_CDs[target].device->changed = 1; if (!quiet) printk(KERN_INFO "sr%d: disc change detected.\n", target); if (retries++ < 10) goto retry; err = -ENOMEDIUM; break; case NOT_READY: /* This happens if there is no disc in drive */ if (SCpnt->sense_buffer[12] == 0x04 && SCpnt->sense_buffer[13] == 0x01) { /* sense: Logical unit is in process of becoming ready */ if (!quiet) printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target); if (retries++ < 10) { /* sleep 2 sec and try again */ /* * The spinlock is silly - we should really lock more of this * function, but the minimal locking required to not lock up * is around this - scsi_sleep() assumes we hold the spinlock. */ spin_lock_irqsave(&io_request_lock, flags); scsi_sleep(2*HZ); spin_unlock_irqrestore(&io_request_lock, flags); goto retry; } else { /* 20 secs are enough? */ err = -ENOMEDIUM; break; } } if (!quiet) printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n",target); #ifdef DEBUG print_sense("sr", SCpnt); #endif err = -ENOMEDIUM; break; case ILLEGAL_REQUEST: if (!quiet) printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL " "REQUEST.\n", target); if (SCpnt->sense_buffer[12] == 0x20 && SCpnt->sense_buffer[13] == 0x00) { /* sense: Invalid command operation code */ err = -EDRIVE_CANT_DO_THIS; } else { err = -EINVAL; } #ifdef DEBUG print_command(sr_cmd); print_sense("sr", SCpnt); #endif break; default: printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target); print_command(sr_cmd); print_sense("sr", SCpnt); err = -EIO; } } spin_lock_irqsave(&io_request_lock, flags); result = SCpnt->result; /* Wake up a process waiting for device*/ wake_up(&SCpnt->device->device_wait); scsi_release_command(SCpnt); SCpnt = NULL; spin_unlock_irqrestore(&io_request_lock, flags); return err; } /* ---------------------------------------------------------------------- */ /* interface to cdrom.c */ static int test_unit_ready(int minor) { u_char sr_cmd[10]; sr_cmd[0] = GPCMD_TEST_UNIT_READY; sr_cmd[1] = ((scsi_CDs[minor].device -> lun) << 5); sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1); } int sr_tray_move(struct cdrom_device_info *cdi, int pos) { u_char sr_cmd[10]; sr_cmd[0] = GPCMD_START_STOP_UNIT; sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device -> lun) << 5); sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0; sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */; return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0); } int sr_lock_door(struct cdrom_device_info *cdi, int lock) { return scsi_ioctl (scsi_CDs[MINOR(cdi->dev)].device, lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK, 0); } int sr_drive_status(struct cdrom_device_info *cdi, int slot) { if (CDSL_CURRENT != slot) { /* we have no changer support */ return -EINVAL; } if (0 == test_unit_ready(MINOR(cdi->dev))) return CDS_DISC_OK; return CDS_TRAY_OPEN; } int sr_disk_status(struct cdrom_device_info *cdi) { struct cdrom_tochdr toc_h; struct cdrom_tocentry toc_e; int i,rc,have_datatracks = 0; /* look for data tracks */ if (0 != (rc = sr_audio_ioctl(cdi, CDROMREADTOCHDR, &toc_h))) return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { toc_e.cdte_track = i; toc_e.cdte_format = CDROM_LBA; if (sr_audio_ioctl(cdi, CDROMREADTOCENTRY, &toc_e)) return CDS_NO_INFO; if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { have_datatracks = 1; break; } } if (!have_datatracks) return CDS_AUDIO; if (scsi_CDs[MINOR(cdi->dev)].xa_flag) return CDS_XA_2_1; else return CDS_DATA_1; } int sr_get_last_session(struct cdrom_device_info *cdi, struct cdrom_multisession* ms_info) { ms_info->addr.lba=scsi_CDs[MINOR(cdi->dev)].ms_offset; ms_info->xa_flag=scsi_CDs[MINOR(cdi->dev)].xa_flag || (scsi_CDs[MINOR(cdi->dev)].ms_offset > 0); return 0; } int sr_get_mcn(struct cdrom_device_info *cdi,struct cdrom_mcn *mcn) { u_char sr_cmd[10]; char buffer[32]; int result; sr_cmd[0] = GPCMD_READ_SUBCHANNEL; sr_cmd[1] = ((scsi_CDs[MINOR(cdi->dev)].device->lun) << 5); sr_cmd[2] = 0x40; /* I do want the subchannel info */ sr_cmd[3] = 0x02; /* Give me medium catalog number info */ sr_cmd[4] = sr_cmd[5] = 0; sr_cmd[6] = 0; sr_cmd[7] = 0; sr_cmd[8] = 24; sr_cmd[9] = 0; result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0); memcpy (mcn->medium_catalog_number, buffer + 9, 13); mcn->medium_catalog_number[13] = 0; return result; } int sr_reset(struct cdrom_device_info *cdi) { invalidate_buffers(cdi->dev); return 0; } int sr_select_speed(struct cdrom_device_info *cdi, int speed) { u_char sr_cmd[12]; if (speed == 0) speed = 0xffff; /* set to max */ else speed *= 177; /* Nx to kbyte/s */ memset(sr_cmd,0,12); sr_cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ sr_cmd[1] = (scsi_CDs[MINOR(cdi->dev)].device->lun) << 5; sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ sr_cmd[3] = speed & 0xff; /* LSB */ if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0)) return -EIO; return 0; } /* ----------------------------------------------------------------------- */ /* this is called by the generic cdrom driver. arg is a _kernel_ pointer, */ /* becauce the generic cdrom driver does the user access stuff for us. */ /* only cdromreadtochdr and cdromreadtocentry are left - for use with the */ /* sr_disk_status interface for the generic cdrom driver. */ int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void* arg) { u_char sr_cmd[10]; int result, target = MINOR(cdi->dev); unsigned char buffer[32]; switch (cmd) { case CDROMREADTOCHDR: { struct cdrom_tochdr* tochdr = (struct cdrom_tochdr*)arg; sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5); sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; sr_cmd[6] = 0; sr_cmd[7] = 0; /* MSB of length (12) */ sr_cmd[8] = 12; /* LSB of length */ sr_cmd[9] = 0; result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1); tochdr->cdth_trk0 = buffer[2]; tochdr->cdth_trk1 = buffer[3]; break; } case CDROMREADTOCENTRY: { struct cdrom_tocentry* tocentry = (struct cdrom_tocentry*)arg; sr_cmd[0] = GPCMD_READ_TOC_PMA_ATIP; sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0); sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0; sr_cmd[6] = tocentry->cdte_track; sr_cmd[7] = 0; /* MSB of length (12) */ sr_cmd[8] = 12; /* LSB of length */ sr_cmd[9] = 0; result = sr_do_ioctl (target, sr_cmd, buffer, 12, 0); tocentry->cdte_ctrl = buffer[5] & 0xf; tocentry->cdte_adr = buffer[5] >> 4; tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; if (tocentry->cdte_format == CDROM_MSF) { tocentry->cdte_addr.msf.minute = buffer[9]; tocentry->cdte_addr.msf.second = buffer[10]; tocentry->cdte_addr.msf.frame = buffer[11]; } else tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + buffer[10]) << 8) + buffer[11]; break; } case CDROMVOLCTRL: { char * buffer, * mask; struct cdrom_volctrl* volctrl = (struct cdrom_volctrl*)arg; /* First we get the current params so we can just twiddle the volume */ sr_cmd[0] = MODE_SENSE; sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5; sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */ sr_cmd[3] = 0; sr_cmd[4] = 28; sr_cmd[5] = 0; if ((buffer = (unsigned char *) scsi_malloc(512)) == NULL) return -ENOMEM; if ((result = sr_do_ioctl(target, sr_cmd, buffer, 28, 0))) { printk ("Hosed while obtaining audio mode page\n"); scsi_free(buffer, 512); break; } sr_cmd[0] = MODE_SENSE; sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5; sr_cmd[2] = 0x4e; /* Want the mask for mode page 0xe */ sr_cmd[3] = 0; sr_cmd[4] = 28; sr_cmd[5] = 0; mask = (unsigned char *) scsi_malloc(512); if(!mask) { scsi_free(buffer, 512); result = -ENOMEM; break; } if ((result = sr_do_ioctl (target, sr_cmd, mask, 28, 0))) { printk("Hosed obtaining mask for audio mode page\n"); scsi_free(buffer, 512); scsi_free(mask, 512); break; } /* Now mask and substitute our own volume and reuse the rest */ buffer[0] = 0; /* Clear reserved field */ buffer[21] = volctrl->channel0 & mask[21]; buffer[23] = volctrl->channel1 & mask[23]; buffer[25] = volctrl->channel2 & mask[25]; buffer[27] = volctrl->channel3 & mask[27]; sr_cmd[0] = MODE_SELECT; sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 0x10; /* Params are SCSI-2 */ sr_cmd[2] = sr_cmd[3] = 0; sr_cmd[4] = 28; sr_cmd[5] = 0; result = sr_do_ioctl (target, sr_cmd, buffer, 28, 0); scsi_free(buffer, 512); scsi_free(mask, 512); break; } case CDROMVOLREAD: { char * buffer; struct cdrom_volctrl* volctrl = (struct cdrom_volctrl*)arg; /* Get the current params */ sr_cmd[0] = MODE_SENSE; sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5; sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */ sr_cmd[3] = 0; sr_cmd[4] = 28; sr_cmd[5] = 0; if ((buffer = (unsigned char *) scsi_malloc(512)) == NULL) return -ENOMEM; if ((result = sr_do_ioctl (target, sr_cmd, buffer, 28, 0))) { printk("(CDROMVOLREAD) Hosed while obtaining audio mode page\n"); scsi_free(buffer, 512); break; } volctrl->channel0 = buffer[21]; volctrl->channel1 = buffer[23]; volctrl->channel2 = buffer[25]; volctrl->channel3 = buffer[27]; scsi_free(buffer, 512); break; } default: return -EINVAL; } #if 0 if (result) printk("DEBUG: sr_audio: result for ioctl %x: %x\n",cmd,result); #endif return result; } /* ----------------------------------------------------------------------- * a function to read all sorts of funny cdrom sectors using the READ_CD * scsi-3 mmc command * * lba: linear block address * format: 0 = data (anything) * 1 = audio * 2 = data (mode 1) * 3 = data (mode 2) * 4 = data (mode 2 form1) * 5 = data (mode 2 form2) * blksize: 2048 | 2336 | 2340 | 2352 */ int sr_read_cd(int minor, unsigned char *dest, int lba, int format, int blksize) { unsigned char cmd[12]; #ifdef DEBUG printk("sr%d: sr_read_cd lba=%d format=%d blksize=%d\n", minor,lba,format,blksize); #endif memset(cmd,0,12); cmd[0] = GPCMD_READ_CD; /* READ_CD */ cmd[1] = (scsi_CDs[minor].device->lun << 5) | ((format & 7) << 2); cmd[2] = (unsigned char)(lba >> 24) & 0xff; cmd[3] = (unsigned char)(lba >> 16) & 0xff; cmd[4] = (unsigned char)(lba >> 8) & 0xff; cmd[5] = (unsigned char) lba & 0xff; cmd[8] = 1; switch (blksize) { case 2336: cmd[9] = 0x58; break; case 2340: cmd[9] = 0x78; break; case 2352: cmd[9] = 0xf8; break; default: cmd[9] = 0x10; break; } return sr_do_ioctl(minor, cmd, dest, blksize, 0); } /* * read sectors with blocksizes other than 2048 */ int sr_read_sector(int minor, int lba, int blksize, unsigned char *dest) { unsigned char cmd[12]; /* the scsi-command */ int rc; /* we try the READ CD command first... */ if (scsi_CDs[minor].readcd_known) { rc = sr_read_cd(minor, dest, lba, 0, blksize); if (-EDRIVE_CANT_DO_THIS != rc) return rc; scsi_CDs[minor].readcd_known = 0; printk("CDROM does'nt support READ CD (0xbe) command\n"); /* fall & retry the other way */ } /* ... if this fails, we switch the blocksize using MODE SELECT */ if (blksize != scsi_CDs[minor].sector_size) if (0 != (rc = sr_set_blocklength(minor, blksize))) return rc; #ifdef DEBUG printk("sr%d: sr_read_sector lba=%d blksize=%d\n",minor,lba,blksize); #endif memset(cmd,0,12); cmd[0] = GPCMD_READ_10; cmd[1] = (scsi_CDs[minor].device->lun << 5); cmd[2] = (unsigned char)(lba >> 24) & 0xff; cmd[3] = (unsigned char)(lba >> 16) & 0xff; cmd[4] = (unsigned char)(lba >> 8) & 0xff; cmd[5] = (unsigned char) lba & 0xff; cmd[8] = 1; rc = sr_do_ioctl(minor, cmd, dest, blksize, 0); return rc; } /* * read a sector in raw mode to check the sector format * ret: 1 == mode2 (XA), 0 == mode1, <0 == error */ int sr_is_xa(int minor) { unsigned char *raw_sector; int is_xa; if (!xa_test) return 0; if ((raw_sector = (unsigned char *) scsi_malloc(2048+512)) == NULL) return -ENOMEM; if (0 == sr_read_sector(minor,scsi_CDs[minor].ms_offset+16, CD_FRAMESIZE_RAW1,raw_sector)) { is_xa = (raw_sector[3] == 0x02) ? 1 : 0; } else { /* read a raw sector failed for some reason. */ is_xa = -1; } scsi_free(raw_sector, 2048+512); #ifdef DEBUG printk("sr%d: sr_is_xa: %d\n",minor,is_xa); #endif return is_xa; } int sr_dev_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg) { switch (cmd) { case BLKRAGET: if (!arg) return -EINVAL; return put_user(read_ahead[MAJOR(cdi->dev)], (long *) arg); case BLKRASET: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!(cdi->dev)) return -EINVAL; if (arg > 0xff) return -EINVAL; read_ahead[MAJOR(cdi->dev)] = arg; return 0; case BLKSSZGET: return put_user(blksize_size[MAJOR(cdi->dev)][MINOR(cdi->dev)], (int *) arg); case BLKFLSBUF: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!(cdi->dev)) return -EINVAL; fsync_dev(cdi->dev); invalidate_buffers(cdi->dev); return 0; default: return scsi_ioctl(scsi_CDs[MINOR(cdi->dev)].device,cmd,(void *) arg); } } /* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically * adjust the settings for this buffer only. This must remain at the end * of the file. * --------------------------------------------------------------------------- * Local variables: * c-indent-level: 4 * c-brace-imaginary-offset: 0 * c-brace-offset: -4 * c-argdecl-indent: 4 * c-label-offset: -4 * c-continued-statement-offset: 4 * c-continued-brace-offset: 0 * indent-tabs-mode: nil * tab-width: 8 * End: */
gpl-2.0
wwhyte-si/libgcrypt-ntru
sample/test_ntru_gcrypt.cpp
2
4437
/* test_ntru_gcrypt.cpp * Copyright (C) 2015, Security Innovation. * Author Zhenfei Zhang <zzhang@securityinnovation.com> * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see <http://www.gnu.org/licenses/>. */ /* This code is an example of using libgcrypt instantiated with NTRUEncrypt * For the latest version of the NTRUEncrypt specs, visit * https://github.com/NTRUOpenSourceProject/NTRUEncrypt */ #include <iostream> #include <gcrypt.h> using namespace std; #ifndef _MAX_NTRU_BUF_SIZE_ #define _MAX_NTRU_BUF_SIZE_ 4000 #endif #define DEBUG /* Dumps a buffer in hex to the screen for debugging */ void gcrypt_init(); int main() { gcrypt_init(); gcry_error_t err = 0; gcry_sexp_t ntru_parms; gcry_sexp_t ntru_keypair; gcry_sexp_t data; gcry_sexp_t cipher; /* * Check if NTRU is avaliable */ err = gcry_pk_test_algo (GCRY_PK_NTRU); if (err) cerr<<"NTRUEncrypt is not supported: "<<err<<endl; /* * initialization * (genkey(ntru(b256))), (genkey(ntru(n743))) = 256 bits security with dimension 743 * (genkey(ntru(b192))), (genkey(ntru(n593))) = 192 bits security with dimension 593 * (genkey(ntru(b128))), (genkey(ntru(n439))) = 128 bits security with dimension 439 * */ err = gcry_sexp_build(&ntru_parms, NULL, "(genkey(ntru(b128)))"); /* * start key generation */ err = gcry_pk_genkey(&ntru_keypair, ntru_parms); /* * parse key pair into pubk and privk */ gcry_sexp_t pubk = gcry_sexp_find_token(ntru_keypair, "public-key", 0); gcry_sexp_t privk = gcry_sexp_find_token(ntru_keypair, "private-key", 0); #ifdef DEBUG /* * dump public key and private key */ cerr<<"error in key gen ?: "<<err<<endl; gcry_sexp_dump (pubk); gcry_sexp_dump (privk); #endif const unsigned char* msg = (const unsigned char*) "Hello SI. Let's encrypt"; err = gcry_sexp_build(&data, NULL, "(data (flags raw) (value %s))", msg); err += gcry_pk_encrypt(&cipher, data, pubk); #ifdef DEBUG /* * dump message and cipher */ cerr<<"error in encryption? : "<<err<<endl; gcry_sexp_dump (data); gcry_sexp_dump (cipher); #endif err = gcry_pk_decrypt(&data, cipher, privk); #ifdef DEBUG /* * dump recovered message */ cerr<<"error in decryption? : "<<err<<endl; gcry_sexp_dump (data); #endif cout << "Hello SI" << endl; // prints Hello SI return 0; } void gcrypt_init() { /* Version check should be the very first call because it makes sure that important subsystems are intialized. */ if (!gcry_check_version (GCRYPT_VERSION)) { cout<<GCRYPT_VERSION<<endl; printf("gcrypt: library version mismatch\n"); } gcry_error_t err = 0; /* We don't want to see any warnings, e.g. because we have not yet parsed program options which might be used to suppress such warnings. */ err = gcry_control (GCRYCTL_SUSPEND_SECMEM_WARN); /* ... If required, other initialization goes here. Note that the process might still be running with increased privileges and that the secure memory has not been intialized. */ /* Allocate a pool of 16k secure memory. This make the secure memory available and also drops privileges where needed. */ err |= gcry_control (GCRYCTL_INIT_SECMEM, 16384, 0); /* It is now okay to let Libgcrypt complain when there was/is a problem with the secure memory. */ err |= gcry_control (GCRYCTL_RESUME_SECMEM_WARN); /* ... If required, other initialization goes here. */ /* Tell Libgcrypt that initialization has completed. */ err |= gcry_control (GCRYCTL_INITIALIZATION_FINISHED, 0); if (err) { printf("gcrypt: failed initialization"); } }
gpl-2.0
hackndev/linux-hnd
sound/soc/s3c24xx/rx3000.c
2
10774
/* * rx3000.c -- ALSA Soc Audio Layer * * Copyright (c) 2007 Roman Moravcik <roman.moravcik@gmail.com> * * Based on smdk2440.c and magician.c * * Authors: Graeme Gregory graeme.gregory@wolfsonmicro.com * Philipp Zabel <philipp.zabel@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <sound/driver.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <asm/mach-types.h> #include <asm/hardware/scoop.h> #include <asm/arch/regs-iis.h> #include <asm/arch/regs-clock.h> #include <asm/arch/regs-gpio.h> #include <asm/arch/audio.h> #include <asm/arch/rx3000-asic3.h> #include <linux/mfd/asic3_base.h> #include <asm/io.h> #include <asm/hardware.h> #include "../codecs/uda1380.h" #include "s3c24xx-pcm.h" #include "s3c24xx-i2s.h" extern struct platform_device s3c_device_asic3; #define RX3000_DEBUG 0 #if RX3000_DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) #endif #define RX3000_HP_OFF 0 #define RX3000_HP_ON 1 #define RX3000_MIC 2 #define RX3000_SPK_ON 0 #define RX3000_SPK_OFF 1 static int rx3000_jack_func = RX3000_HP_OFF; static int rx3000_spk_func = RX3000_SPK_ON; static void rx3000_ext_control(struct snd_soc_codec *codec) { if (rx3000_spk_func == RX3000_SPK_ON) snd_soc_dapm_set_endpoint(codec, "Speaker", 1); else snd_soc_dapm_set_endpoint(codec, "Speaker", 0); /* set up jack connection */ switch (rx3000_jack_func) { case RX3000_HP_OFF: snd_soc_dapm_set_endpoint(codec, "Headphone Jack", 0); snd_soc_dapm_set_endpoint(codec, "Mic Jack", 0); break; case RX3000_HP_ON: snd_soc_dapm_set_endpoint(codec, "Headphone Jack", 1); snd_soc_dapm_set_endpoint(codec, "Mic Jack", 0); break; case RX3000_MIC: snd_soc_dapm_set_endpoint(codec, "Headphone Jack", 0); snd_soc_dapm_set_endpoint(codec, "Mic Jack", 1); break; } snd_soc_dapm_sync_endpoints(codec); } static int rx3000_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->socdev->codec; DBG("Entered rx3000_startup\n"); /* check the jack status at stream startup */ rx3000_ext_control(codec); return 0; } static void rx3000_shutdown(struct snd_pcm_substream *substream) { // struct snd_soc_pcm_runtime *rtd = substream->private_data; // struct snd_soc_codec *codec = rtd->socdev->codec; DBG("Entered rx3000_shutdown\n"); } static int rx3000_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_cpu_dai *cpu_dai = rtd->dai->cpu_dai; struct snd_soc_codec_dai *codec_dai = rtd->dai->codec_dai; unsigned long iis_clkrate; int div, div256, div384, diff256, diff384, bclk; int ret; unsigned int rate=params_rate(params); DBG("Entered %s\n",__FUNCTION__); iis_clkrate = s3c24xx_i2s_get_clockrate(); /* Using PCLK doesnt seem to suit audio particularly well on these cpu's */ div256 = iis_clkrate / (rate * 256); div384 = iis_clkrate / (rate * 384); if (((iis_clkrate / div256) - (rate * 256)) < ((rate * 256) - (iis_clkrate / (div256 + 1)))) { diff256 = (iis_clkrate / div256) - (rate * 256); } else { div256++; diff256 = (iis_clkrate / div256) - (rate * 256); } if (((iis_clkrate / div384) - (rate * 384)) < ((rate * 384) - (iis_clkrate / (div384 + 1)))) { diff384 = (iis_clkrate / div384) - (rate * 384); } else { div384++; diff384 = (iis_clkrate / div384) - (rate * 384); } DBG("diff256 %d, diff384 %d\n", diff256, diff384); if (diff256<=diff384) { DBG("Selected 256FS\n"); div = div256; bclk = S3C2410_IISMOD_256FS; } else { DBG("Selected 384FS\n"); div = div384; bclk = S3C2410_IISMOD_384FS; } /* set codec DAI configuration */ ret = codec_dai->dai_ops.set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = cpu_dai->dai_ops.set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set the audio system clock for DAC and ADC */ ret = cpu_dai->dai_ops.set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; /* set MCLK division for sample rate */ ret = cpu_dai->dai_ops.set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, S3C2410_IISMOD_32FS ); if (ret < 0) return ret; /* set BCLK division for sample rate */ ret = cpu_dai->dai_ops.set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK, bclk); if (ret < 0) return ret; /* set prescaler division for sample rate */ ret = cpu_dai->dai_ops.set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER, S3C24XX_PRESCALE(div,div)); if (ret < 0) return ret; return 0; } static struct snd_soc_ops rx3000_ops = { .startup = rx3000_startup, .shutdown = rx3000_shutdown, .hw_params = rx3000_hw_params, }; static int rx3000_get_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = rx3000_jack_func; return 0; } static int rx3000_set_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (rx3000_jack_func == ucontrol->value.integer.value[0]) return 0; rx3000_jack_func = ucontrol->value.integer.value[0]; rx3000_ext_control(codec); return 1; } static int rx3000_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = rx3000_spk_func; return 0; } static int rx3000_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (rx3000_spk_func == ucontrol->value.integer.value[0]) return 0; rx3000_spk_func = ucontrol->value.integer.value[0]; rx3000_ext_control(codec); return 1; } static int rx3000_spk_power(struct snd_soc_dapm_widget *w, int event) { if (SND_SOC_DAPM_EVENT_ON(event)) asic3_set_gpio_out_a(&s3c_device_asic3.dev, ASIC3_GPA1, ASIC3_GPA1); else asic3_set_gpio_out_a(&s3c_device_asic3.dev, ASIC3_GPA1, 0); return 0; } /* rx3000 machine dapm widgets */ static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), SND_SOC_DAPM_SPK("Speaker", rx3000_spk_power), }; /* rx3000 machine audio_map */ static const char *audio_map[][3] = { /* headphone connected to VOUTLHP, VOUTRHP */ {"Headphone Jack", NULL, "VOUTLHP"}, {"Headphone Jack", NULL, "VOUTRHP"}, /* ext speaker connected to VOUTL, VOUTR */ {"Speaker", NULL, "VOUTL"}, {"Speaker", NULL, "VOUTR"}, /* mic is connected to VINM */ {"VINM", NULL, "Mic Jack"}, {NULL, NULL, NULL}, }; static const char *jack_function[] = {"Off", "Headphone", "Mic"}; static const char *spk_function[] = {"On", "Off"}; static const struct soc_enum rx3000_enum[] = { SOC_ENUM_SINGLE_EXT(3, jack_function), SOC_ENUM_SINGLE_EXT(2, spk_function), }; static const struct snd_kcontrol_new uda1380_rx3000_controls[] = { SOC_ENUM_EXT("Jack Function", rx3000_enum[0], rx3000_get_jack, rx3000_set_jack), SOC_ENUM_EXT("Speaker Function", rx3000_enum[1], rx3000_get_spk, rx3000_set_spk), }; /* * Logic for a UDA1380 as attached to RX3000 */ static int rx3000_uda1380_init(struct snd_soc_codec *codec) { int i, err; DBG("Staring rx3000 init\n"); /* NC codec pins */ snd_soc_dapm_set_endpoint(codec, "VOUTLHP", 0); snd_soc_dapm_set_endpoint(codec, "VOUTRHP", 0); /* Add rx3000 specific controls */ for (i = 0; i < ARRAY_SIZE(uda1380_rx3000_controls); i++) { if ((err = snd_ctl_add(codec->card, snd_soc_cnew(&uda1380_rx3000_controls[i], codec, NULL))) < 0) return err; } /* Add rx3000 specific widgets */ for(i = 0; i < ARRAY_SIZE(uda1380_dapm_widgets); i++) { snd_soc_dapm_new_control(codec, &uda1380_dapm_widgets[i]); } /* Set up rx3000 specific audio path audio_mapnects */ for(i = 0; audio_map[i][0] != NULL; i++) { snd_soc_dapm_connect_input(codec, audio_map[i][0], audio_map[i][1], audio_map[i][2]); } snd_soc_dapm_sync_endpoints(codec); DBG("Ending rx3000 init\n"); return 0; } /* s3c24xx digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link s3c24xx_dai = { .name = "uda1380", .stream_name = "UDA1380", .cpu_dai = &s3c24xx_i2s_dai, .codec_dai = &uda1380_dai[UDA1380_DAI_DUPLEX], .init = rx3000_uda1380_init, .ops = &rx3000_ops, }; /* rx3000 audio machine driver */ static struct snd_soc_machine snd_soc_machine_rx3000 = { .name = "RX3000", .dai_link = &s3c24xx_dai, .num_links = 1, }; static struct uda1380_setup_data rx3000_uda1380_setup = { .i2c_address = 0x1a, .dac_clk = UDA1380_DAC_CLK_SYSCLK, }; /* s3c24xx audio subsystem */ static struct snd_soc_device s3c24xx_snd_devdata = { .machine = &snd_soc_machine_rx3000, .platform = &s3c24xx_soc_platform, .codec_dev = &soc_codec_dev_uda1380, .codec_data = &rx3000_uda1380_setup, }; static struct platform_device *s3c24xx_snd_device; static int __init rx3000_init(void) { int ret; if (!machine_is_rx3715()) return -ENODEV; /* enable uda1380 power supply */ asic3_set_gpio_out_c(&s3c_device_asic3.dev, ASIC3_GPC7, ASIC3_GPC7); /* reset uda1380 */ asic3_set_gpio_out_a(&s3c_device_asic3.dev, ASIC3_GPA2, 0); mdelay(1); asic3_set_gpio_out_a(&s3c_device_asic3.dev, ASIC3_GPA2, ASIC3_GPA2); udelay(1); asic3_set_gpio_out_a(&s3c_device_asic3.dev, ASIC3_GPA2, 0); /* correct place? we'll need it to talk to the uda1380 */ request_module("i2c-s3c2410"); s3c24xx_snd_device = platform_device_alloc("soc-audio", -1); if (!s3c24xx_snd_device) { DBG("platform_dev_alloc failed\n"); free_irq(IRQ_EINT19, NULL); return -ENOMEM; } platform_set_drvdata(s3c24xx_snd_device, &s3c24xx_snd_devdata); s3c24xx_snd_devdata.dev = &s3c24xx_snd_device->dev; ret = platform_device_add(s3c24xx_snd_device); if (ret) platform_device_put(s3c24xx_snd_device); return ret; } static void __exit rx3000_exit(void) { platform_device_unregister(s3c24xx_snd_device); } module_init(rx3000_init); module_exit(rx3000_exit); /* Module information */ MODULE_AUTHOR("Roman Moravcik, <roman.moravcik@gmail.com>"); MODULE_DESCRIPTION("ALSA SoC RX3000"); MODULE_LICENSE("GPL");
gpl-2.0
timattox/lammps_USER-DPD
src/REPLICA/fix_event_hyper.cpp
2
2977
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #include <cstdlib> #include <cstring> #include "fix_event_hyper.h" #include "atom.h" #include "update.h" #include "domain.h" #include "neighbor.h" #include "comm.h" #include "universe.h" #include "memory.h" #include "error.h" using namespace LAMMPS_NS; using namespace FixConst; /* ---------------------------------------------------------------------- */ FixEventHyper::FixEventHyper(LAMMPS *lmp, int narg, char **arg) : FixEvent(lmp, narg, arg) { if (narg != 3) error->all(FLERR,"Illegal fix event command"); restart_global = 1; event_number = 0; event_timestep = update->ntimestep; clock = 0; } /* ---------------------------------------------------------------------- save current atom coords as an event (via call to base class) called when an event occurs in some replica set event_timestep = when event occurred in a particular replica update clock = elapsed time since last event, across all replicas ------------------------------------------------------------------------- */ void FixEventHyper::store_event_hyper(bigint ntimestep, int delta_clock) { store_event(); event_timestep = ntimestep; clock += delta_clock; event_number++; } /* ---------------------------------------------------------------------- pack entire state of Fix into one write ------------------------------------------------------------------------- */ void FixEventHyper::write_restart(FILE *fp) { int n = 0; double list[6]; list[n++] = event_number; list[n++] = event_timestep; list[n++] = clock; list[n++] = replica_number; list[n++] = correlated_event; list[n++] = ncoincident; if (comm->me == 0) { int size = n * sizeof(double); fwrite(&size,sizeof(int),1,fp); fwrite(list,sizeof(double),n,fp); } } /* ---------------------------------------------------------------------- use state info from restart file to restart the Fix ------------------------------------------------------------------------- */ void FixEventHyper::restart(char *buf) { int n = 0; double *list = (double *) buf; event_number = static_cast<int> (list[n++]); event_timestep = static_cast<bigint> (list[n++]); clock = static_cast<bigint> (list[n++]); replica_number = static_cast<int> (list[n++]); correlated_event = static_cast<int> (list[n++]); ncoincident = static_cast<int> (list[n++]); }
gpl-2.0
a1an1in/libcdf
src/libbus/bus.c
2
21395
/** * @file bus.c * @synopsis * @author alan(a1an1in@sina.com) * @version * @date 2016-10-28 */ /* Copyright (c) 2015-2020 alan lin <a1an1in@sina.com> * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <unistd.h> #include <libdbg/debug.h> #include <libbus/bus.h> #include <libblob/blob.h> #include <miscellany/buffer.h> static const blob_policy_t bus_policy[] = { [BUS_ID] = { .name = "id", .type = BLOB_TYPE_INT32 }, [BUS_OBJNAME] = { .name = "object_name", .type = BLOB_TYPE_STRING }, [BUS_METHORDS] = { .name = "methods", .type = BLOB_TYPE_TABLE }, [BUS_STATE] = { .name = "state", .type = BLOB_TYPE_INT32 }, [BUS_OPAQUE] = { .name = "opaque", .type = BLOB_TYPE_BUFFER }, [BUS_INVOKE_SRC_FD] = { .name = "source_fd", .type = BLOB_TYPE_INT32 }, [BUS_INVOKE_DST_FD] = { .name = "destination_fd", .type = BLOB_TYPE_INT32 }, [BUS_INVOKE_METHOD] = { .name = "invoke_method", .type = BLOB_TYPE_STRING }, [BUS_INVOKE_ARGC] = { .name = "invoke_argc", .type = BLOB_TYPE_INT32 }, [BUS_INVOKE_ARGS] = { .name = "invoke_args", .type = BLOB_TYPE_TABLE }, }; bus_t * bus_alloc(allocator_t *allocator) { bus_t *b; b = (bus_t *)allocator_mem_alloc(allocator,sizeof(bus_t)); if ( b == NULL) { dbg_str(BUS_DETAIL,"allocator_mem_alloc"); return NULL; } memset(b,0, sizeof(bus_t)); b->allocator = allocator; return b; } int bus_set(bus_t *bus,char *attrib_name, char *value, int value_len) { if (!strcmp(attrib_name,"client_sk_type")) { bus->client_sk_type = value; }else{ dbg_str(BUS_WARNNING,"not support attrib setting,please check"); } return 0; } int bus_init(bus_t *bus, char *server_host, char *server_srv, int (*process_client_task_cb)(client_task_t *task)) { if (bus->client_sk_type == NULL) { bus->client_sk_type = (char *)(&(CLIENT_TYPE_UNIX_TCP)); } bus->server_host = server_host; bus->server_srv = server_srv; bus->client = client(bus->allocator, bus->client_sk_type, bus->server_host, bus->server_srv, process_client_task_cb, bus); bus->blob = blob_create(bus->allocator); if (bus->blob == NULL) { client_destroy(bus->client); dbg_str(BUS_WARNNING,"blob_create"); return -1; } blob_init(bus->blob); /*create object hash map*/ if (bus->key_size == 0) { bus->key_size = 10; } if (bus->bucket_size == 0) { bus->bucket_size = 20; } bus->obj_hmap = hash_map_alloc(bus->allocator); if (bus->obj_hmap == NULL) { dbg_str(BUS_ERROR,"hash_map_create"); //......... return -1; } hash_map_init(bus->obj_hmap, bus->key_size,//uint32_t key_size, sizeof(bus_object_t), bus->bucket_size); /*create req hash map*/ if (bus->req_key_size == 0) { bus->req_key_size = 10; } if (bus->req_bucket_size == 0) { bus->req_bucket_size = 20; } bus->req_hmap = hash_map_alloc(bus->allocator); if (bus->req_hmap == NULL) { dbg_str(BUS_ERROR,"hash_map_create"); //......... return -1; } hash_map_init(bus->req_hmap, bus->req_key_size,//uint32_t req_key_size, sizeof(bus_object_t), bus->req_bucket_size); return 1; } int bus_send(bus_t *bus, void *buf, size_t buf_len) { int ret = 0; ret = client_send(bus->client, buf,//const void *buf, buf_len,//size_t nbytes, 0,//int flags, bus->server_host,//char *dest_id_str, bus->server_srv);//char *dest_srv_str) if (ret < 0) { dbg_str(BUS_WARNNING,"bus send err"); } return ret; } int bus_push_args_to_blob(blob_t *blob,struct bus_method *method) { int i; for (i = 0; i < method->n_policy; i++) { blob_add_u32(blob, method->policy[i].name, method->policy[i].type); } return 0; } int bus_push_methods_to_blob(blob_t *blob,struct bus_object *obj) { int i; dbg_str(BUS_DETAIL,"bus_push_method_to_blob,n_methods=%d",obj->n_methods); for (i = 0; i < obj->n_methods; i++) { dbg_str(BUS_DETAIL,"push method:%s",obj->methods[i].name); blob_add_table_start(blob, obj->methods[i].name); bus_push_args_to_blob(blob,&(obj->methods[i])); blob_add_table_end(blob); dbg_str(BUS_DETAIL,"push method end"); } dbg_str(BUS_DETAIL,"bus_push_methods_to_blob end"); return 0; } int __bus_add_obj(bus_t *bus,struct bus_object *obj) { uint8_t addr_buffer[8]; addr_to_buffer(obj,addr_buffer); hash_map_insert(bus->obj_hmap,obj->name,addr_buffer); return 0; } int bus_add_object(bus_t *bus,struct bus_object *obj) { bus_reqhdr_t hdr; blob_t *blob = bus->blob; #define BUS_ADD_OBJECT_MAX_BUFFER_LEN 1024 uint8_t buffer[BUS_ADD_OBJECT_MAX_BUFFER_LEN]; #undef BUS_ADD_OBJECT_MAX_BUFFER_LEN uint32_t buffer_len; dbg_str(BUS_DETAIL,"bus_add_object,obj addr:%p",obj); memset(&hdr,0,sizeof(hdr)); hdr.type = BUS_REQ_ADD_OBJECT; blob_add_table_start(blob,(char *)"object"); { blob_add_string(blob, (char *)"object_name", obj->name); blob_add_u32(blob, (char *)"id", 1); blob_add_table_start(blob, (char *)"methods"); { bus_push_methods_to_blob(blob, obj); } blob_add_table_end(blob); } blob_add_table_end(blob); /* *dbg_str(BUS_DETAIL,"run at here"); */ memcpy(buffer,&hdr, sizeof(hdr)); buffer_len = sizeof(hdr); /* *dbg_buf(BUS_DETAIL,"object:",blob->head,blob_get_len(blob->head)); */ memcpy(buffer + buffer_len,(uint8_t *)blob->head,blob_get_len((blob_attr_t *)blob->head)); buffer_len += blob_get_len((blob_attr_t *)blob->head); dbg_buf(BUS_DETAIL,"bus send:",buffer,buffer_len); bus_send(bus, buffer, buffer_len); __bus_add_obj(bus,obj); return 0; } int bus_handle_add_object_reply(bus_t *bus, blob_attr_t **attr) { int state; dbg_str(BUS_DETAIL,"bus_handle_add_object_reply"); if (attr[BUS_STATE]) { state = blob_get_u32(attr[BUS_STATE]); dbg_str(BUS_DETAIL,"state=%d",state); } if (attr[BUS_OBJNAME]) { dbg_str(BUS_DETAIL,"object name:%s",blob_get_string(attr[BUS_OBJNAME])); } if ( state == 1) { dbg_str(BUS_SUC,"add obj success"); } else { dbg_str(BUS_ERROR,"add obj failed"); //..... del the obj } return 0; } int bus_lookup(bus_t *bus, char *key) { bus_reqhdr_t hdr; blob_t *blob = bus->blob; #define BUS_ADD_OBJECT_MAX_BUFFER_LEN 1024 uint8_t buffer[BUS_ADD_OBJECT_MAX_BUFFER_LEN]; #undef BUS_ADD_OBJECT_MAX_BUFFER_LEN uint32_t buffer_len; memset(&hdr,0,sizeof(hdr)); hdr.type = BUS_REQ_LOOKUP; blob_add_table_start(blob,(char *)"lookup"); { blob_add_string(blob, (char *)"object_name", key); } blob_add_table_end(blob); /* *dbg_str(BUS_DETAIL,"run at here"); */ memcpy(buffer,&hdr, sizeof(hdr)); buffer_len = sizeof(hdr); /* *dbg_buf(BUS_DETAIL,"object:",blob->head,blob_get_len(blob->head)); */ memcpy(buffer + buffer_len,(uint8_t *)blob->head,blob_get_len((blob_attr_t *)blob->head)); buffer_len += blob_get_len((blob_attr_t *)blob->head); dbg_buf(BUS_DETAIL,"bus send:",buffer,buffer_len); bus_send(bus, buffer, buffer_len); return 0; } int bus_handle_lookup_object_reply(bus_t *bus, blob_attr_t **attr) { struct bus_object *obj; blob_attr_t *attrib,*head; uint32_t len; int ret; dbg_str(BUS_DETAIL,"bus_handle_lookup_object_reply"); if (attr[BUS_ID]) { dbg_str(BUS_DETAIL,"object id:%d",blob_get_u32(attr[BUS_ID])); } if (attr[BUS_OBJNAME]) { dbg_str(BUS_DETAIL,"object name:%s",blob_get_string(attr[BUS_OBJNAME])); } if (attr[BUS_METHORDS]) { dbg_str(BUS_DETAIL,"object methods"); head = (blob_attr_t *)blob_get_data(attr[BUS_METHORDS]); len = blob_get_data_len(attr[BUS_METHORDS]); blob_for_each_attr(attrib, head, len) { dbg_str(BUS_DETAIL,"method name:%s",blob_get_name(attrib)); } } } int bus_blob_add_args(blob_t *blob,int argc, bus_method_args_t *args) { int i; for (i = 0; i < argc; i++) { if (args[i].type == ARG_TYPE_STRING) { blob_add_string(blob, (char *)args[i].name, args[i].value); } else if (args[i].type == ARG_TYPE_INT32) { blob_add_u32(blob, (char *)args[i].name, atoi(args[i].value)); } else { dbg_str(BUS_WARNNING,"bus_blob_add_args,not support type = %d",args[i].type); } dbg_str(BUS_DETAIL,"bus_blob_add_arg:name \"%s\" value \"%s\"",args[i].name, args[i].value); } return 0; } int bus_invoke(bus_t *bus,char *key, char *method,int argc, bus_method_args_t *args) { bus_reqhdr_t hdr; blob_t *blob = bus->blob; #define BUS_ADD_OBJECT_MAX_BUFFER_LEN 1024 uint8_t buffer[BUS_ADD_OBJECT_MAX_BUFFER_LEN]; #undef BUS_ADD_OBJECT_MAX_BUFFER_LEN uint32_t buffer_len; dbg_str(BUS_SUC,"bus_invoke"); /*compose req proto*/ memset(&hdr,0,sizeof(hdr)); hdr.type = BUS_REQ_INVOKE; blob_add_table_start(blob,(char *)"invoke"); { blob_add_string(blob, (char *)"invoke_key", key); blob_add_string(blob, (char *)"invoke_method", method); blob_add_u8(blob, (char *)"invoke_argc", argc); blob_add_table_start(blob, (char *)"invoke_args"); { bus_blob_add_args(blob,argc,args); } blob_add_table_end(blob); } blob_add_table_end(blob); memcpy(buffer,&hdr, sizeof(hdr)); buffer_len = sizeof(hdr); memcpy(buffer + buffer_len,(uint8_t *)blob->head,blob_get_len((blob_attr_t *)blob->head)); buffer_len += blob_get_len((blob_attr_t *)blob->head); /*send req proto*/ dbg_buf(BUS_DETAIL,"bus send:",buffer,buffer_len); bus_send(bus, buffer, buffer_len); return 0; } int bus_invoke_async(bus_t *bus,char *key, char *method,int argc, char **args) { /* * bus_req_t req; * * bus_invoke(bus,key, method,argc, args); * * req.method = method; * req.state = -1; * * make_pair(bus->pair,method,&req); * hash_map_insert_data(bus->obj_hmap,bus->pair->data); */ return 0; } int bus_invoke_sync(bus_t *bus,char *key, char *method,int argc, bus_method_args_t *args,char *out_buf,char *out_len) { bus_req_t req,*req_back; hash_map_pos_t out; int ret; int count = 0; int state = 0; #define MAX_BUFFER_LEN 2048 char buffer[MAX_BUFFER_LEN]; #undef MAX_BUFFER_LEN req.method = method; req.state = 0xffff; req.opaque = NULL; req.opaque_len = 0; req.opaque_buffer_len = 0; ret = hash_map_insert_wb(bus->req_hmap,method,&req,&out); if (ret < 0) { dbg_str(BUS_WARNNING,"bus_invoke_sync"); return ret; } req_back = (bus_req_t *)hash_map_pos_get_pointer(&out); req_back->opaque = buffer; req_back->opaque_buffer_len = sizeof(buffer); bus_invoke(bus,key, method,argc, args); while (req_back->state == 0xffff) { sleep(1); count++; if (count > 5) { req_back->state = -99; break; } } dbg_str(BUS_SUC,"bus_invoke_sync,rev return state =%d",req_back->state); dbg_buf(BUS_SUC,"opaque:",req_back->opaque,req_back->opaque_len); memcpy(out_buf,req_back->opaque,req_back->opaque_len); *out_len = req_back->opaque_len; state = req_back->state; hash_map_delete(bus->req_hmap, &out); return state; } int bus_handle_invoke_reply(bus_t *bus, blob_attr_t **attr) { char *obj_name, *method_name; hash_map_pos_t pos; bus_req_t *req; int state; int ret; char *buffer = NULL; int buffer_len = 0; dbg_str(BUS_SUC,"bus_handle_invoke_reply"); if (attr[BUS_STATE]) { state = blob_get_u32(attr[BUS_STATE]); dbg_str(BUS_DETAIL,"state:%d",state); } if (attr[BUS_OBJNAME]) { obj_name = blob_get_string(attr[BUS_OBJNAME]); dbg_str(BUS_DETAIL,"object name:%s",obj_name); } if (attr[BUS_INVOKE_METHOD]) { method_name = blob_get_string(attr[BUS_INVOKE_METHOD]); dbg_str(BUS_DETAIL,"method name:%s",method_name); } if (attr[BUS_OPAQUE]) { buffer_len = blob_get_buffer(attr[BUS_OPAQUE],(uint8_t**)&buffer); dbg_buf(BUS_DETAIL,"bus_handle_invoke_reply,buffer:",buffer,buffer_len); } if (method_name != NULL) { ret = hash_map_search(bus->req_hmap, method_name ,&pos); if (ret > 0) { req = (bus_req_t *)hash_map_pos_get_pointer(&pos); req->state = state; if (req->opaque_buffer_len < buffer_len) { dbg_str(BUS_WARNNING,"opaque buffer is too small , please check"); } req->opaque_len = buffer_len; memcpy(req->opaque,buffer,buffer_len); dbg_str(BUS_DETAIL,"method_name:%s,state:%d",req->method,req->state); } } return 0; } bus_handler_t bus_get_method_handler(bus_object_t *obj,char *method) { int i; for (i = 0; i < obj->n_methods; i++) { if (!strncmp(obj->methods[i].name,method,sizeof(obj->methods[i].name))) { return obj->methods[i].handler; } } return NULL; } blob_policy_t * bus_get_policy(bus_object_t *obj,char *method) { int i; for (i = 0; i < obj->n_methods; i++) { if (!strncmp(obj->methods[i].name,method,sizeof(obj->methods[i].name))) { return obj->methods[i].policy; } } return NULL; } int bus_get_n_policy(bus_object_t *obj,char *method) { int i; for (i = 0; i < obj->n_methods; i++) { if (!strncmp(obj->methods[i].name,method,sizeof(obj->methods[i].name))) { return obj->methods[i].n_policy; } } return -1; } int bus_reply_forward_invoke(bus_t *bus, char *obj_name,char *method_name, int ret, char *buf, int buf_len,int src_fd) { #define BUS_ADD_OBJECT_MAX_BUFFER_LEN 2048 bus_reqhdr_t hdr; blob_t *blob; uint8_t buffer[BUS_ADD_OBJECT_MAX_BUFFER_LEN]; uint32_t buffer_len,tmp_len; allocator_t *allocator = bus->allocator; dbg_str(BUS_SUC,"bus_reply_forward_invoke"); memset(&hdr,0,sizeof(hdr)); hdr.type = BUS_REPLY_FORWARD_INVOKE; blob = blob_create(allocator); if (blob == NULL) { dbg_str(BUS_WARNNING,"blob_create"); return -1; } blob_init(blob); blob_add_table_start(blob,(char *)"reply_forward_invoke"); { blob_add_string(blob, (char *)"object_name", obj_name); blob_add_string(blob, (char *)"invoke_method", method_name); blob_add_u32(blob, (char *)"state", ret); blob_add_buffer(blob, (char *)"opaque", buf, buf_len); blob_add_u32(blob, (char *)"source_fd", src_fd); } blob_add_table_end(blob); memcpy(buffer,&hdr, sizeof(hdr)); buffer_len = sizeof(hdr); tmp_len = buffer_len + blob_get_len((blob_attr_t *)blob->head); if (tmp_len > BUS_ADD_OBJECT_MAX_BUFFER_LEN) { dbg_str(BUS_WARNNING,"buffer is too small,please check"); return -1; } memcpy(buffer + buffer_len,(uint8_t *)blob->head,blob_get_len((blob_attr_t *)blob->head)); buffer_len += blob_get_len((blob_attr_t *)blob->head); dbg_buf(BUS_DETAIL,"bus send:",buffer,buffer_len); bus_send(bus, buffer, buffer_len); return 0; #undef BUS_ADD_OBJECT_MAX_BUFFER_LEN } int bus_handle_forward_invoke(bus_t *bus, blob_attr_t **attr) { bus_object_t *obj = NULL; blob_attr_t *args = NULL; int argc = 0; char *method_name = NULL; char *obj_name; int src_fd = -1; hash_map_pos_t pos; bus_handler_t method; uint8_t *p; blob_policy_t *policy; int n_policy; struct blob_attr_s *tb[10]; #define MAX_BUFFER_LEN 2048 char buffer[MAX_BUFFER_LEN]; int ret, buffer_len = 9; dbg_str(BUS_SUC,"bus_handle_forward_invoke"); if (attr[BUS_INVOKE_SRC_FD]) { src_fd = blob_get_u32(attr[BUS_INVOKE_SRC_FD]); dbg_str(BUS_DETAIL,"invoke src fd:%d",src_fd); } if (attr[BUS_INVOKE_METHOD]) { method_name = blob_get_string(attr[BUS_INVOKE_METHOD]); dbg_str(BUS_DETAIL,"invoke method_name:%s",method_name); } if (attr[BUS_INVOKE_ARGC]) { argc = blob_get_u8(attr[BUS_INVOKE_ARGC]); dbg_str(BUS_DETAIL,"invoke argc=%d",argc); } if (attr[BUS_INVOKE_ARGS]) { dbg_str(BUS_DETAIL,"invoke args"); args = attr[BUS_INVOKE_ARGS]; } if (attr[BUS_OBJNAME]) { obj_name = blob_get_string(attr[BUS_OBJNAME]); dbg_str(BUS_DETAIL,"object name:%s",obj_name); } if (method_name != NULL) { ret = hash_map_search(bus->obj_hmap, obj_name ,&pos); if (ret > 0) { p = (uint8_t *)hash_map_pos_get_pointer(&pos); obj = (bus_object_t *)buffer_to_addr(p); dbg_str(BUS_DETAIL,"obj addr:%p",obj); method = bus_get_method_handler(obj,method_name); policy = bus_get_policy(obj,method_name); n_policy = bus_get_n_policy(obj,method_name); dbg_str(BUS_DETAIL,"policy addr:%p,size=%d",policy,ARRAY_SIZE(policy)); blob_parse_to_attr(policy, n_policy, tb, blob_get_data(args), blob_get_data_len(args)); ret = method(bus,argc,tb,buffer,&buffer_len); if (buffer_len > MAX_BUFFER_LEN) { dbg_str(BUS_WARNNING,"buffer is too small,please check"); } bus_reply_forward_invoke(bus,obj_name,method_name, ret, buffer, buffer_len,src_fd); } } return 0; #undef MAX_BUFFER_LEN } static bus_cmd_callback handlers[__BUS_REQ_LAST] = { [BUSD_REPLY_ADD_OBJECT] = bus_handle_add_object_reply, [BUSD_REPLY_LOOKUP] = bus_handle_lookup_object_reply, [BUSD_REPLY_INVOKE] = bus_handle_invoke_reply, [BUSD_FORWARD_INVOKE] = bus_handle_forward_invoke, }; static int bus_process_receiving_data_callback(client_task_t *task) { bus_reqhdr_t *hdr; blob_attr_t *blob_attr; blob_attr_t *tb[__BUS_MAX]; bus_cmd_callback cb = NULL; client_t *client = task->client; bus_t *bus = (bus_t *)client->opaque; int len; dbg_str(BUS_DETAIL,"bus_process_receiving_data_callback"); dbg_buf(BUS_DETAIL,"task buffer:",task->buffer,task->buffer_len); hdr = (bus_reqhdr_t *)task->buffer; blob_attr = (blob_attr_t *)(task->buffer + sizeof(bus_reqhdr_t)); if (hdr->type > __BUS_REQ_LAST) { dbg_str(BUS_WARNNING,"bus receive err proto type"); return -1; } cb = handlers[hdr->type]; len = blob_get_data_len(blob_attr); blob_attr =(blob_attr_t*) blob_get_data(blob_attr); blob_parse_to_attr(bus_policy, ARRAY_SIZE(bus_policy), tb, blob_attr, len); cb(bus,tb); dbg_str(BUS_DETAIL,"process_rcv end"); return 0; } bus_t * bus_create(allocator_t *allocator, char *server_host, char *server_srv, char *socket_type) { bus_t *bus; dbg_str(BUS_DETAIL,"bus_create"); bus = bus_alloc(allocator); bus_set(bus,"client_sk_type", socket_type, 0); bus_init(bus,//bus_t *bus, server_host,//char *server_host, server_srv,//char *server_srv, bus_process_receiving_data_callback); return bus; }
gpl-2.0
teamfx/openjfx-10-dev-rt
modules/javafx.web/src/main/native/Source/WebCore/html/RadioInputType.cpp
2
7069
/* * Copyright (C) 2005, 2011, 2016 Apple Inc. All rights reserved. * Copyright (C) 2010 Google Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #include "config.h" #include "RadioInputType.h" #include "Frame.h" #include "HTMLFormElement.h" #include "HTMLInputElement.h" #include "HTMLNames.h" #include "InputTypeNames.h" #include "KeyboardEvent.h" #include "LocalizedStrings.h" #include "MouseEvent.h" #include "NodeTraversal.h" #include "SpatialNavigation.h" namespace WebCore { using namespace HTMLNames; const AtomicString& RadioInputType::formControlType() const { return InputTypeNames::radio(); } bool RadioInputType::valueMissing(const String&) const { return element().isInRequiredRadioButtonGroup() && !element().checkedRadioButtonForGroup(); } String RadioInputType::valueMissingText() const { return validationMessageValueMissingForRadioText(); } void RadioInputType::handleClickEvent(MouseEvent& event) { event.setDefaultHandled(); } void RadioInputType::handleKeydownEvent(KeyboardEvent& event) { BaseCheckableInputType::handleKeydownEvent(event); if (event.defaultHandled()) return; const String& key = event.keyIdentifier(); if (key != "Up" && key != "Down" && key != "Left" && key != "Right") return; // Left and up mean "previous radio button". // Right and down mean "next radio button". // Tested in WinIE, and even for RTL, left still means previous radio button (and so moves // to the right). Seems strange, but we'll match it. // However, when using Spatial Navigation, we need to be able to navigate without changing the selection. if (isSpatialNavigationEnabled(element().document().frame())) return; bool forward = (key == "Down" || key == "Right"); // We can only stay within the form's children if the form hasn't been demoted to a leaf because // of malformed HTML. Node* node = &element(); while ((node = (forward ? NodeTraversal::next(*node) : NodeTraversal::previous(*node)))) { // Once we encounter a form element, we know we're through. if (is<HTMLFormElement>(*node)) break; // Look for more radio buttons. if (!is<HTMLInputElement>(*node)) continue; RefPtr<HTMLInputElement> inputElement = downcast<HTMLInputElement>(node); if (inputElement->form() != element().form()) break; if (inputElement->isRadioButton() && inputElement->name() == element().name() && inputElement->isFocusable()) { element().document().setFocusedElement(inputElement.get()); inputElement->dispatchSimulatedClick(&event, SendNoEvents, DoNotShowPressedLook); event.setDefaultHandled(); return; } } } void RadioInputType::handleKeyupEvent(KeyboardEvent& event) { const String& key = event.keyIdentifier(); if (key != "U+0020") return; // If an unselected radio is tabbed into (because the entire group has nothing // checked, or because of some explicit .focus() call), then allow space to check it. if (element().checked()) return; dispatchSimulatedClickIfActive(event); } bool RadioInputType::isKeyboardFocusable(KeyboardEvent& event) const { if (!InputType::isKeyboardFocusable(event)) return false; // When using Spatial Navigation, every radio button should be focusable. if (isSpatialNavigationEnabled(element().document().frame())) return true; // Never allow keyboard tabbing to leave you in the same radio group. Always // skip any other elements in the group. Element* currentFocusedNode = element().document().focusedElement(); if (is<HTMLInputElement>(currentFocusedNode)) { HTMLInputElement& focusedInput = downcast<HTMLInputElement>(*currentFocusedNode); if (focusedInput.isRadioButton() && focusedInput.form() == element().form() && focusedInput.name() == element().name()) return false; } // Allow keyboard focus if we're checked or if nothing in the group is checked. return element().checked() || !element().checkedRadioButtonForGroup(); } bool RadioInputType::shouldSendChangeEventAfterCheckedChanged() { // Don't send a change event for a radio button that's getting unchecked. // This was done to match the behavior of other browsers. return element().checked(); } void RadioInputType::willDispatchClick(InputElementClickState& state) { // An event handler can use preventDefault or "return false" to reverse the selection we do here. // The InputElementClickState object contains what we need to undo what we did here in didDispatchClick. // We want radio groups to end up in sane states, i.e., to have something checked. // Therefore if nothing is currently selected, we won't allow the upcoming action to be "undone", since // we want some object in the radio group to actually get selected. state.checked = element().checked(); state.checkedRadioButton = element().checkedRadioButtonForGroup(); element().setChecked(true, DispatchChangeEvent); } void RadioInputType::didDispatchClick(Event* event, const InputElementClickState& state) { if (event->defaultPrevented() || event->defaultHandled()) { // Restore the original selected radio button if possible. // Make sure it is still a radio button and only do the restoration if it still belongs to our group. HTMLInputElement* checkedRadioButton = state.checkedRadioButton.get(); if (checkedRadioButton && checkedRadioButton->isRadioButton() && checkedRadioButton->form() == element().form() && checkedRadioButton->name() == element().name()) { checkedRadioButton->setChecked(true); } } // The work we did in willDispatchClick was default handling. event->setDefaultHandled(); } bool RadioInputType::isRadioButton() const { return true; } bool RadioInputType::matchesIndeterminatePseudoClass() const { const HTMLInputElement& element = this->element(); if (const RadioButtonGroups* radioButtonGroups = element.radioButtonGroups()) return !radioButtonGroups->hasCheckedButton(&element); return !element.checked(); } } // namespace WebCore
gpl-2.0
Spade17/4.3.4-Core
src/server/scripts/EasternKingdoms/Karazhan/boss_nightbane.cpp
2
13272
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Nightbane SD%Complete: 80 SDComment: SDComment: Timers may incorrect SDCategory: Karazhan EndScriptData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "karazhan.h" enum Spells { // phase 1 SPELL_BELLOWING_ROAR = 39427, SPELL_CHARRED_EARTH = 30129, SPELL_DISTRACTING_ASH = 30130, SPELL_SMOLDERING_BREATH = 30210, SPELL_TAIL_SWEEP = 25653, // phase 2 SPELL_RAIN_OF_BONES = 37098, SPELL_SMOKING_BLAST = 37057, SPELL_FIREBALL_BARRAGE = 30282, SPELL_SEARING_CINDERS = 30127, SPELL_SUMMON_SKELETON = 30170 }; enum Says { EMOTE_SUMMON = 0, // Not used in script YELL_AGGRO = 1, YELL_FLY_PHASE = 2, YELL_LAND_PHASE = 3, EMOTE_BREATH = 4 }; float IntroWay[8][3] = { {-11053.37f, -1794.48f, 149.00f}, {-11141.07f, -1841.40f, 125.00f}, {-11187.28f, -1890.23f, 125.00f}, {-11189.20f, -1931.25f, 125.00f}, {-11153.76f, -1948.93f, 125.00f}, {-11128.73f, -1929.75f, 125.00f}, {-11140.00f, -1915.00f, 122.00f}, {-11163.00f, -1903.00f, 91.473f} }; class boss_nightbane : public CreatureScript { public: boss_nightbane() : CreatureScript("boss_nightbane") { } CreatureAI* GetAI(Creature* creature) const { return new boss_nightbaneAI (creature); } struct boss_nightbaneAI : public ScriptedAI { boss_nightbaneAI(Creature* creature) : ScriptedAI(creature) { instance = creature->GetInstanceScript(); Intro = true; } InstanceScript* instance; uint32 Phase; bool RainBones; bool Skeletons; uint32 BellowingRoarTimer; uint32 CharredEarthTimer; uint32 DistractingAshTimer; uint32 SmolderingBreathTimer; uint32 TailSweepTimer; uint32 RainofBonesTimer; uint32 SmokingBlastTimer; uint32 FireballBarrageTimer; uint32 SearingCindersTimer; uint32 FlyCount; uint32 FlyTimer; bool Intro; bool Flying; bool Movement; uint32 WaitTimer; uint32 MovePhase; void Reset() { BellowingRoarTimer = 30000; CharredEarthTimer = 15000; DistractingAshTimer = 20000; SmolderingBreathTimer = 10000; TailSweepTimer = 12000; RainofBonesTimer = 10000; SmokingBlastTimer = 20000; FireballBarrageTimer = 13000; SearingCindersTimer = 14000; WaitTimer = 1000; Phase =1; FlyCount = 0; MovePhase = 0; me->SetSpeed(MOVE_RUN, 2.0f); me->SetDisableGravity(true); me->SetWalk(false); me->setActive(true); if (instance) { if (instance->GetData(TYPE_NIGHTBANE) == DONE || instance->GetData(TYPE_NIGHTBANE) == IN_PROGRESS) me->DisappearAndDie(); else instance->SetData(TYPE_NIGHTBANE, NOT_STARTED); } HandleTerraceDoors(true); Flying = false; Movement = false; if (!Intro) { me->SetHomePosition(IntroWay[7][0], IntroWay[7][1], IntroWay[7][2], 0); me->GetMotionMaster()->MoveTargetedHome(); } } void HandleTerraceDoors(bool open) { if (instance) { instance->HandleGameObject(instance->GetData64(DATA_MASTERS_TERRACE_DOOR_1), open); instance->HandleGameObject(instance->GetData64(DATA_MASTERS_TERRACE_DOOR_2), open); } } void EnterCombat(Unit* /*who*/) { if (instance) instance->SetData(TYPE_NIGHTBANE, IN_PROGRESS); HandleTerraceDoors(false); Talk(YELL_AGGRO); } void AttackStart(Unit* who) { if (!Intro && !Flying) ScriptedAI::AttackStart(who); } void JustDied(Unit* /*killer*/) { if (instance) instance->SetData(TYPE_NIGHTBANE, DONE); HandleTerraceDoors(true); } void MoveInLineOfSight(Unit* who) { if (!Intro && !Flying) ScriptedAI::MoveInLineOfSight(who); } void MovementInform(uint32 type, uint32 id) { if (type != POINT_MOTION_TYPE) return; if (Intro) { if (id >= 8) { Intro = false; me->SetHomePosition(IntroWay[7][0], IntroWay[7][1], IntroWay[7][2], 0); return; } WaitTimer = 1; } if (Flying) { if (id == 0) { Talk(EMOTE_BREATH); Flying = false; Phase = 2; return; } if (id == 3) { MovePhase = 4; WaitTimer = 1; return; } if (id == 8) { Flying = false; Phase = 1; Movement = true; return; } WaitTimer = 1; } } void JustSummoned(Creature* summoned) { summoned->AI()->AttackStart(me->getVictim()); } void TakeOff() { Talk(YELL_FLY_PHASE); me->InterruptSpell(CURRENT_GENERIC_SPELL); me->HandleEmoteCommand(EMOTE_ONESHOT_LIFTOFF); me->SetDisableGravity(true); (*me).GetMotionMaster()->Clear(false); (*me).GetMotionMaster()->MovePoint(0, IntroWay[2][0], IntroWay[2][1], IntroWay[2][2]); Flying = true; FlyTimer = urand(45000, 60000); //timer wrong between 45 and 60 seconds ++FlyCount; RainofBonesTimer = 5000; //timer wrong (maybe) RainBones = false; Skeletons = false; } void UpdateAI(uint32 diff) { /* The timer for this was never setup apparently, not sure if the code works properly: if (WaitTimer <= diff) { if (Intro) { if (MovePhase >= 7) { me->SetLevitate(false); me->HandleEmoteCommand(EMOTE_ONESHOT_LAND); me->GetMotionMaster()->MovePoint(8, IntroWay[7][0], IntroWay[7][1], IntroWay[7][2]); } else { me->GetMotionMaster()->MovePoint(MovePhase, IntroWay[MovePhase][0], IntroWay[MovePhase][1], IntroWay[MovePhase][2]); ++MovePhase; } } if (Flying) { if (MovePhase >= 7) { me->SetLevitate(false); me->HandleEmoteCommand(EMOTE_ONESHOT_LAND); me->GetMotionMaster()->MovePoint(8, IntroWay[7][0], IntroWay[7][1], IntroWay[7][2]); } else { me->GetMotionMaster()->MovePoint(MovePhase, IntroWay[MovePhase][0], IntroWay[MovePhase][1], IntroWay[MovePhase][2]); ++MovePhase; } } WaitTimer = 0; } else WaitTimer -= diff; */ if (!UpdateVictim()) return; //if (Flying) who is the idiot who wrote those two lines? // return; // Phase 1 "GROUND FIGHT" if (Phase == 1) { if (Movement) { DoStartMovement(me->getVictim()); Movement = false; } if (BellowingRoarTimer <= diff) { DoCast(me->getVictim(), SPELL_BELLOWING_ROAR); BellowingRoarTimer = urand(30000, 40000); } else BellowingRoarTimer -= diff; if (SmolderingBreathTimer <= diff) { DoCast(me->getVictim(), SPELL_SMOLDERING_BREATH); SmolderingBreathTimer = 20000; } else SmolderingBreathTimer -= diff; if (CharredEarthTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) DoCast(target, SPELL_CHARRED_EARTH); CharredEarthTimer = 20000; } else CharredEarthTimer -= diff; if (TailSweepTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) if (!me->HasInArc(M_PI, target)) DoCast(target, SPELL_TAIL_SWEEP); TailSweepTimer = 15000; } else TailSweepTimer -= diff; if (SearingCindersTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) DoCast(target, SPELL_SEARING_CINDERS); SearingCindersTimer = 10000; } else SearingCindersTimer -= diff; uint32 Prozent = uint32(me->GetHealthPct()); if (Prozent < 75 && FlyCount == 0) // first take off 75% TakeOff(); if (Prozent < 50 && FlyCount == 1) // secound take off 50% TakeOff(); if (Prozent < 25 && FlyCount == 2) // third take off 25% TakeOff(); DoMeleeAttackIfReady(); } //Phase 2 "FLYING FIGHT" if (Phase == 2) { if (!RainBones) { if (!Skeletons) { for (uint8 i = 0; i <= 3; ++i) { DoCast(me->getVictim(), SPELL_SUMMON_SKELETON); Skeletons = true; } } if (RainofBonesTimer < diff && !RainBones) // only once at the beginning of phase 2 { DoCast(me->getVictim(), SPELL_RAIN_OF_BONES); RainBones = true; SmokingBlastTimer = 20000; } else RainofBonesTimer -= diff; if (DistractingAshTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) DoCast(target, SPELL_DISTRACTING_ASH); DistractingAshTimer = 2000; //timer wrong } else DistractingAshTimer -= diff; } if (RainBones) { if (SmokingBlastTimer <= diff) { DoCast(me->getVictim(), SPELL_SMOKING_BLAST); SmokingBlastTimer = 1500; //timer wrong } else SmokingBlastTimer -= diff; } if (FireballBarrageTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_FARTHEST, 0)) DoCast(target, SPELL_FIREBALL_BARRAGE); FireballBarrageTimer = 20000; } else FireballBarrageTimer -= diff; if (FlyTimer <= diff) //landing { Talk(YELL_LAND_PHASE); me->GetMotionMaster()->Clear(false); me->GetMotionMaster()->MovePoint(3, IntroWay[3][0], IntroWay[3][1], IntroWay[3][2]); Flying = true; } else FlyTimer -= diff; } } }; }; void AddSC_boss_nightbane() { new boss_nightbane(); }
gpl-2.0
sjp38/linux.doc_trans_membarrier
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2
8904
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse * Christian König */ #include <linux/seq_file.h> #include <linux/slab.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" #include "atom.h" /* * IB * IBs (Indirect Buffers) and areas of GPU accessible memory where * commands are stored. You can put a pointer to the IB in the * command ring and the hw will fetch the commands from the IB * and execute them. Generally userspace acceleration drivers * produce command buffers which are send to the kernel and * put in IBs for execution by the requested ring. */ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); /** * amdgpu_ib_get - request an IB (Indirect Buffer) * * @ring: ring index the IB is associated with * @size: requested IB size * @ib: IB object returned * * Request an IB (all asics). IBs are allocated using the * suballocator. * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib) { int r; if (size) { r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); return r; } ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); if (!vm) ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } return 0; } /** * amdgpu_ib_free - free an IB (Indirect Buffer) * * @adev: amdgpu_device pointer * @ib: IB object to free * @f: the fence SA bo need wait on for the ib alloation * * Free an IB (all asics). */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f) { amdgpu_sa_bo_free(adev, &ib->sa_bo, f); } /** * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @adev: amdgpu_device pointer * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @f: fence created during this submission * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. * * On SI, there are two parallel engines fed from the primary ring, * the CE (Constant Engine) and the DE (Drawing Engine). Since * resource descriptors have moved to memory, the CE allows you to * prime the caches while the DE is updating register state so that * the resource descriptors will be already in cache when the draw is * processed. To accomplish this, the userspace driver submits two * IBs, one for the CE and one for the DE. If there is a CE IB (called * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ibs, struct fence *last_vm_update, struct amdgpu_job *job, struct fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; struct fence *hwf; uint64_t ctx; unsigned i; int r = 0; if (num_ibs == 0) return -EINVAL; /* ring tests don't use a job */ if (job) { vm = job->vm; ctx = job->ctx; } else { vm = NULL; ctx = 0; } if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } if (vm && !job->vm_id) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } r = amdgpu_ring_alloc(ring, 256 * num_ibs); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); if (vm) { r = amdgpu_vm_flush(ring, job); if (r) { amdgpu_ring_undo(ring); return r; } } if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); /* always set cond_exec_polling to CONTINUE */ *ring->cond_exe_cpu_addr = 1; skip_preamble = ring->current_ctx == ctx; need_ctx_switch = ring->current_ctx != ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; /* drop preamble IBs if we don't have a context switch */ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) continue; amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, need_ctx_switch); need_ctx_switch = false; } if (ring->funcs->emit_hdp_invalidate) amdgpu_ring_emit_hdp_invalidate(ring); r = amdgpu_fence_emit(ring, &hwf); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) amdgpu_vm_reset_id(adev, job->vm_id); amdgpu_ring_undo(ring); return r; } /* wrap the last IB with fence */ if (job && job->uf_addr) { amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } if (f) *f = fence_get(hwf); if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); ring->current_ctx = ctx; amdgpu_ring_commit(ring); return 0; } /** * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool * * @adev: amdgpu_device pointer * * Initialize the suballocator to manage a pool of memory * for use as IBs (all asics). * Returns 0 on success, error on failure. */ int amdgpu_ib_pool_init(struct amdgpu_device *adev) { int r; if (adev->ib_pool_ready) { return 0; } r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, AMDGPU_IB_POOL_SIZE*64*1024, AMDGPU_GPU_PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT); if (r) { return r; } r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo); if (r) { return r; } adev->ib_pool_ready = true; if (amdgpu_debugfs_sa_init(adev)) { dev_err(adev->dev, "failed to register debugfs file for SA\n"); } return 0; } /** * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool * * @adev: amdgpu_device pointer * * Tear down the suballocator managing the pool of memory * for use as IBs (all asics). */ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) { if (adev->ib_pool_ready) { amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo); amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); adev->ib_pool_ready = false; } } /** * amdgpu_ib_ring_tests - test IBs on the rings * * @adev: amdgpu_device pointer * * Test an IB (Indirect Buffer) on each ring. * If the test fails, disable the ring. * Returns 0 on success, error if the primary GFX ring * IB test fails. */ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) { unsigned i; int r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->ready) continue; r = amdgpu_ring_test_ib(ring); if (r) { ring->ready = false; if (ring == &adev->gfx.gfx_ring[0]) { /* oh, oh, that's really bad */ DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); adev->accel_working = false; return r; } else { /* still not good, but we can live with it */ DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); } } } return 0; } /* * Debugfs info */ #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); return 0; } static const struct drm_info_list amdgpu_debugfs_sa_list[] = { {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, }; #endif static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); #else return 0; #endif }
gpl-2.0
OpenEngineDK/openengine
src/Logging/ColorStreamLogger.cpp
2
2351
// Generic stream logger. // ------------------------------------------------------------------- // Copyright (C) 2007 OpenEngine.dk (See AUTHORS) // // This program is free software; It is covered by the GNU General // Public License version 2 or any later version. // See the GNU General Public License for more details (see LICENSE). // -------------------------------------------------------------------- #include <Logging/ColorStreamLogger.h> #include <Meta/Time.h> #include <Utils/DateTime.h> #include <string.h> #include <stdlib.h> namespace OpenEngine { namespace Logging { /** * Create a logger wrapping an output stream. * * @param stream Stream to use as log output. */ ColorStreamLogger::ColorStreamLogger(ostream* stream) : stream(stream) { char *term = getenv("TERM"); colorsEnabled = term?(strcmp(term,"xterm-color") == 0):false; } /** * Destruct the stream logger. * Flushes the stream. */ ColorStreamLogger::~ColorStreamLogger(){ if (stream!=NULL) { stream->flush(); delete stream; } } /** * Write a log message. * * @param type Log message type. * @param msg Message to log. */ void ColorStreamLogger::Write(LoggerType type, string msg) { if (colorsEnabled) { *stream << "\033["; // print the CSI *stream << ColorForType(type); *stream << 'm'; // the previous was a SGR } *stream << TypeToString(type) << " "; *stream << Utils::DateTime::GetCurrent() << ": "; *stream << msg ; if (colorsEnabled) { // reset color *stream << "\033["; // print the CSI *stream << 'm'; // the previous was a SGR } *stream << std::endl; } /** * Get string representation for a log message type. * * @param type Log message type. * @return String representation of type. */ string ColorStreamLogger::TypeToString(LoggerType type){ string str; if (type == Error) str ="[EE]"; else if (type == Warning) str = "[WW]"; else if (type == Info) str = "[II]"; else { str = "["; str += type; str += "]"; } return str; } int ColorStreamLogger::ColorForType(LoggerType type) { switch (type) { case Info: return 32; case Warning: return 33; case Error: return 31; } return 30; } } //NS Logging } //NS OpenEngine
gpl-2.0
gotm-model/code
src/turbulence/potentialml.F90
2
5728
#include"cppdefs.h" !------------------------------------------------------------------------- !BOP ! ! !ROUTINE: Algebraic length-scale with two master scales \label{sec:potentialml} ! ! !INTERFACE: subroutine potentialml(nlev,z0b,z0s,h,depth,NN) ! !DESCRIPTION: ! Computes the length scale by defining two master ! length scales $l_u$ and $l_d$ ! \begin{equation} ! \begin{array}{l} ! \int_{z_0}^{z_0+l_u(z_0)} (b(z_0)-b(z)) dz =k(z_0) \comma \\[4mm] ! \int_{z_0-l_d(z_0)}^{z_0} (b(z)-b(z_0)) dz =k(z_0) ! \end{array} ! \end{equation} ! ! From $l_u$ and $l_d$ two length--scales are defined: $l_k$, ! a characteristic mixing length, ! and $l_\epsilon$, a characteristic dissipation length. ! They are computed according to ! \begin{equation} ! \begin{array}{l} ! l_k(z_0)= \text{Min} ( l_d(z_0),l_u(z_0)) \comma \\[4mm] ! l_{\epsilon}(z_0)=\left( l_d(z_0)l_u(z_0)\right)^\frac{1}{2} ! \point ! \end{array} ! \end{equation} ! ! $l_k$ is used in {\tt kolpran()} to compute eddy viscosity/difussivity. ! $l_{\epsilon}$ is used to compute the dissipation rate, $\epsilon$ ! according to ! \begin{equation} ! \epsilon=C_{\epsilon} k^{3/2} l_{\epsilon}^{-1} ! \comma ! C_{\epsilon}=0.7 ! \point ! \end{equation} ! ! !USES: use turbulence, only: L,eps,tke,k_min,eps_min use turbulence, only: cde,galp,kappa,length_lim IMPLICIT NONE ! ! !INPUT PARAMETERS: ! number of vertical layers integer, intent(in) :: nlev ! bottom and surface roughness (m) REALTYPE, intent(in) :: z0b,z0s ! layer thickness (m) REALTYPE, intent(in) :: h(0:nlev) ! local depth (m) REALTYPE, intent(in) :: depth ! buoyancy frequency (1/s^2) REALTYPE, intent(in) :: NN(0:nlev) ! ! !REVISION HISTORY: ! Original author(s): Manuel Ruiz Villarreal, Hans Burchard ! !EOP !------------------------------------------------------------------------- ! ! !LOCAL VARIABLES: integer :: i,j REALTYPE :: ds(0:nlev),db(0:nlev) REALTYPE :: lu(0:nlev),ld(0:nlev) REALTYPE :: lk(0:nlev),leps(0:nlev) REALTYPE :: Lcrit,buoydiff,integral,ceps REALTYPE, parameter :: NNmin=1.e-8 ! !------------------------------------------------------------------------- !BOC db(0)=0. ds(nlev)=0. do i=1,nlev-1 db(i)=db(i-1)+h(i) ! distance of intercace i from bottom ds(i)=depth-db(i) ! distance of intercace i from surface end do ! ! Calculation of lu and ld by solving the integral equation following ! Gaspar (1990). Some other approximations of the integral equation ! are possible. ! ! Computation of lupward ! do i=1,nlev-1 lu(i)=0. integral=0. buoydiff=0. do j=i+1,nlev buoydiff=buoydiff+NN(j-1)*0.5*(h(j)+h(j-1)) integral=integral+buoydiff*h(j) if (integral.ge.tke(i)) then if(j.ne.nlev) then if(j.ne.i+1) then lu(i)=lu(i)-(integral-tke(i))/buoydiff else ! To avoid lu(i) from becoming too large if NN(i) is too small if(NN(i).gt.NNmin) then lu(i)=sqrt(2.)*sqrt(tke(i))/sqrt(NN(i)) else lu(i)=h(i) end if end if goto 600 end if end if lu(i)=lu(i)+h(j) end do 600 continue ! Implicitely done in the do loop: if (lu(i).gt.ds(i)) lu(i)=ds(i) ! lu limited by distance to surface end do ! Computation of ldownward do i=nlev-1,1,-1 ld(i)=0. integral=0. buoydiff=0. do j=i-1,1,-1 buoydiff=buoydiff+NN(j)*0.5*(h(j+1)+h(j)) integral=integral-buoydiff*h(j) if (integral.ge.tke(i)) then if(j.ne.0) then if(j.ne.i-1) then ld(i)=ld(i)-(integral-tke(i))/buoydiff else ! To avoid ld(i) from becoming too large if NN(i) is too small if(NN(i).gt.NNmin) then ld(i)=sqrt(2.)*sqrt(tke(i))/sqrt(NN(i)) else ld(i)=h(i) end if end if goto 610 end if end if ld(i)=ld(i)+h(j) end do 610 continue ! if (ld(i).gt.db(i)) ld(i)=db(i) !ld limited by distance to bottom end do ! Calculation of lk and leps, mixing and dissipation lengths do i=nlev-1,1,-1 ! Suggested by Gaspar: lk(i) = min(lu(i),ld(i)) lk(i)=sqrt(lu(i)*ld(i)) leps(i) = sqrt(lu(i)*ld(i)) end do ! We set L=lk because it is the one we use to calculate num and nuh ceps=0.7 do i=1,nlev-1 L(i)=lk(i) end do ! do the boundaries assuming linear log-law length-scale L(0)=kappa*z0b L(nlev)=kappa*z0s do i=0,nlev ! clip the length-scale at the Galperin et al. (1988) value ! under stable stratifcitation if ((NN(i).gt.0).and.(length_lim)) then Lcrit=sqrt(2*galp*galp*tke(i)/NN(i)) if (L(i).gt.Lcrit) L(i)=Lcrit end if ! compute the dissipation rate eps(i)=cde*sqrt(tke(i)*tke(i)*tke(i))/L(i) ! substitute minimum value if (eps(i).lt.eps_min) then eps(i) = eps_min L(i) = cde*sqrt(tke(i)*tke(i)*tke(i))/eps_min endif enddo return end subroutine potentialml !EOC !----------------------------------------------------------------------- ! Copyright by the GOTM-team under the GNU Public License - www.gnu.org !-----------------------------------------------------------------------
gpl-2.0
Biktorgj/Android_b2_Kernel
drivers/media/video/exynos/jpeg/jpeg_regs.c
2
17039
/* linux/drivers/media/video/exynos/jpeg/jpeg_regs.c * * Copyright (c) 2012~2013 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Register interface file for jpeg v2.x driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/delay.h> #include "jpeg_regs.h" #include "jpeg_conf.h" #include "jpeg_core.h" #include "regs_jpeg_v2_x.h" void jpeg_sw_reset(void __iomem *base) { unsigned int reg; reg = readl(base + S5P_JPEG_CNTL_REG); writel((reg & S5P_JPEG_ENC_DEC_MODE_MASK), base + S5P_JPEG_CNTL_REG); reg = readl(base + S5P_JPEG_CNTL_REG); writel(reg & ~S5P_JPEG_SOFT_RESET_HI, base + S5P_JPEG_CNTL_REG); ndelay(100000); writel(reg | S5P_JPEG_SOFT_RESET_HI, base + S5P_JPEG_CNTL_REG); } void jpeg_set_enc_dec_mode(void __iomem *base, enum jpeg_mode mode) { unsigned int reg; reg = readl(base + S5P_JPEG_CNTL_REG); /* set jpeg mod register */ if (mode == DECODING) { writel((reg & S5P_JPEG_ENC_DEC_MODE_MASK) | S5P_JPEG_DEC_MODE, base + S5P_JPEG_CNTL_REG); } else {/* encode */ writel((reg & S5P_JPEG_ENC_DEC_MODE_MASK) | S5P_JPEG_ENC_MODE, base + S5P_JPEG_CNTL_REG); } } void jpeg_set_dec_out_fmt(void __iomem *base, enum jpeg_frame_format out_fmt) { unsigned int reg = 0; writel(0, base + S5P_JPEG_IMG_FMT_REG); /* clear */ /* set jpeg deocde ouput format register */ switch (out_fmt) { case GRAY: reg = S5P_JPEG_DEC_GRAY_IMG | S5P_JPEG_GRAY_IMG_IP; break; case RGB_888: reg = S5P_JPEG_DEC_RGB_IMG | S5P_JPEG_RGB_IP_RGB_32BIT_IMG | S5P_JPEG_ENC_FMT_RGB; break; case RGB_565: reg = S5P_JPEG_DEC_RGB_IMG | S5P_JPEG_RGB_IP_RGB_16BIT_IMG; break; case YCRCB_444_2P: reg = S5P_JPEG_DEC_YUV_444_IMG | S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG | S5P_JPEG_SWAP_CHROMA_CrCb; break; case YCBCR_444_2P: reg = S5P_JPEG_DEC_YUV_444_IMG | S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG | S5P_JPEG_SWAP_CHROMA_CbCr; break; case YCBCR_444_3P: reg = S5P_JPEG_DEC_YUV_444_IMG | S5P_JPEG_YUV_444_IP_YUV_444_3P_IMG; break; case BGR_888: reg = S5P_JPEG_DEC_RGB_IMG | S5P_JPEG_RGB_IP_RGB_32BIT_IMG |S5P_JPEG_ENC_FMT_BGR; break; case CRYCBY_422_1P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_VYUY; break; case CBYCRY_422_1P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_UYVY; break; case YCRYCB_422_1P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_YVYU; break; case YCBYCR_422_1P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_YUYV; break; case YCRCB_422_2P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG | S5P_JPEG_SWAP_CHROMA_CrCb; break; case YCBCR_422_2P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG | S5P_JPEG_SWAP_CHROMA_CbCr; break; case YCBYCR_422_3P: reg = S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_3P_IMG; break; case YCRCB_420_2P: reg = S5P_JPEG_DEC_YUV_420_IMG | S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG | S5P_JPEG_SWAP_CHROMA_CrCb; break; case YCBCR_420_2P: reg = S5P_JPEG_DEC_YUV_420_IMG | S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG | S5P_JPEG_SWAP_CHROMA_CbCr; break; case YCBCR_420_3P: case YCRCB_420_3P: reg = S5P_JPEG_DEC_YUV_420_IMG | S5P_JPEG_YUV_420_IP_YUV_420_3P_IMG; break; default: break; } writel(reg, base + S5P_JPEG_IMG_FMT_REG); } void jpeg_set_enc_in_fmt(void __iomem *base, enum jpeg_frame_format in_fmt) { unsigned int reg; reg = readl(base + S5P_JPEG_IMG_FMT_REG) & S5P_JPEG_ENC_IN_FMT_MASK; /* clear except enc format */ switch (in_fmt) { case GRAY: reg = reg | S5P_JPEG_ENC_GRAY_IMG | S5P_JPEG_GRAY_IMG_IP; break; case RGB_565: reg = reg | S5P_JPEG_ENC_RGB_IMG | S5P_JPEG_RGB_IP_RGB_16BIT_IMG; break; case YCRCB_444_2P: reg = reg | S5P_JPEG_ENC_YUV_444_IMG | S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG | S5P_JPEG_SWAP_CHROMA_CrCb; break; case YCBCR_444_2P: reg = reg | S5P_JPEG_ENC_YUV_444_IMG | S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG | S5P_JPEG_SWAP_CHROMA_CbCr; break; case YCBCR_444_3P: reg = reg | S5P_JPEG_ENC_YUV_444_IMG | S5P_JPEG_YUV_444_IP_YUV_444_3P_IMG; break; case RGB_888: reg = reg | S5P_JPEG_DEC_RGB_IMG | S5P_JPEG_RGB_IP_RGB_32BIT_IMG |S5P_JPEG_ENC_FMT_RGB; break; case BGR_888: reg = reg | S5P_JPEG_DEC_RGB_IMG | S5P_JPEG_RGB_IP_RGB_32BIT_IMG |S5P_JPEG_ENC_FMT_BGR; break; case CRYCBY_422_1P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_VYUY; break; case CBYCRY_422_1P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_UYVY; break; case YCRYCB_422_1P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_YVYU; break; case YCBYCR_422_1P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG | S5P_JPEG_ENC_FMT_YUYV; break; case YCRCB_422_2P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG | S5P_JPEG_SWAP_CHROMA_CrCb; break; case YCBCR_422_2P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG | S5P_JPEG_SWAP_CHROMA_CbCr; break; case YCBYCR_422_3P: reg = reg | S5P_JPEG_DEC_YUV_422_IMG | S5P_JPEG_YUV_422_IP_YUV_422_3P_IMG; break; case YCRCB_420_2P: reg = reg | S5P_JPEG_DEC_YUV_420_IMG | S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG | S5P_JPEG_SWAP_CHROMA_CrCb; break; case YCBCR_420_2P: reg = reg | S5P_JPEG_DEC_YUV_420_IMG | S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG | S5P_JPEG_SWAP_CHROMA_CbCr; break; case YCBCR_420_3P: case YCRCB_420_3P: reg = reg | S5P_JPEG_DEC_YUV_420_IMG | S5P_JPEG_YUV_420_IP_YUV_420_3P_IMG; break; default: break; } writel(reg, base + S5P_JPEG_IMG_FMT_REG); } void jpeg_set_enc_out_fmt(void __iomem *base, enum jpeg_frame_format out_fmt) { unsigned int reg; reg = readl(base + S5P_JPEG_IMG_FMT_REG) & ~S5P_JPEG_ENC_FMT_MASK; /* clear enc format */ switch (out_fmt) { case JPEG_GRAY: reg = reg | S5P_JPEG_ENC_FMT_GRAY; break; case JPEG_444: reg = reg | S5P_JPEG_ENC_FMT_YUV_444; break; case JPEG_422: reg = reg | S5P_JPEG_ENC_FMT_YUV_422; break; case JPEG_420: reg = reg | S5P_JPEG_ENC_FMT_YUV_420; break; default: break; } writel(reg, base + S5P_JPEG_IMG_FMT_REG); } void jpeg_set_enc_tbl(void __iomem *base, enum jpeg_img_quality_level level) { int i; switch (level) { case QUALITY_LEVEL_1: for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[0][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[1][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x40 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[0][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x80 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[1][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0xc0 + (i*0x04)); } break; case QUALITY_LEVEL_2: for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[2][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[3][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x40 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[2][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x80 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[3][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0xc0 + (i*0x04)); } break; case QUALITY_LEVEL_3: for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[4][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[5][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x40 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[4][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x80 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[5][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0xc0 + (i*0x04)); } break; case QUALITY_LEVEL_4: for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[6][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[7][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x40 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[6][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x80 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[7][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0xc0 + (i*0x04)); } break; default: for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[0][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[1][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x40 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[0][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x80 + (i*0x04)); } for (i = 0; i < 16; i++) { writel((unsigned int)ITU_Q_tbl[1][i], base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0xc0 + (i*0x04)); } break; } for (i = 0; i < 4; i++) { writel((unsigned int)ITU_H_tbl_len_DC_luminance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + (i*0x04)); } for (i = 0; i < 3; i++) { writel((unsigned int)ITU_H_tbl_val_DC_luminance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x10 + (i*0x04)); } for (i = 0; i < 4; i++) { writel((unsigned int)ITU_H_tbl_len_DC_chrominance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x20 + (i*0x04)); } for (i = 0; i < 3; i++) { writel((unsigned int)ITU_H_tbl_val_DC_chrominance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x30 + (i*0x04)); } for (i = 0; i < 4; i++) { writel((unsigned int)ITU_H_tbl_len_AC_luminance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x40 + (i*0x04)); } for (i = 0; i < 41; i++) { writel((unsigned int)ITU_H_tbl_val_AC_luminance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x50 + (i*0x04)); } for (i = 0; i < 4; i++) { writel((unsigned int)ITU_H_tbl_len_AC_chrominance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x100 + (i*0x04)); } for (i = 0; i < 41; i++) { writel((unsigned int)ITU_H_tbl_val_AC_chrominance[i], base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x110 + (i*0x04)); } } void jpeg_set_interrupt(void __iomem *base) { unsigned int reg; reg = readl(base + S5P_JPEG_INT_EN_REG) & ~S5P_JPEG_INT_EN_MASK; writel(S5P_JPEG_INT_EN_ALL, base + S5P_JPEG_INT_EN_REG); } void jpeg_clean_interrupt(void __iomem *base) { writel(0, base + S5P_JPEG_INT_EN_REG); } unsigned int jpeg_get_int_status(void __iomem *base) { unsigned int int_status; int_status = readl(base + S5P_JPEG_INT_STATUS_REG); return int_status; } void jpeg_set_huf_table_enable(void __iomem *base, int value) { unsigned int reg; reg = readl(base + S5P_JPEG_CNTL_REG) & ~S5P_JPEG_HUF_TBL_EN; if (value == 1) writel(reg | S5P_JPEG_HUF_TBL_EN, base + S5P_JPEG_CNTL_REG); else writel(reg | ~S5P_JPEG_HUF_TBL_EN, base + S5P_JPEG_CNTL_REG); } void jpeg_set_dec_scaling(void __iomem *base, enum jpeg_scale_value x_value, enum jpeg_scale_value y_value) { unsigned int reg; reg = readl(base + S5P_JPEG_CNTL_REG) & ~(S5P_JPEG_HOR_SCALING_MASK | S5P_JPEG_VER_SCALING_MASK); writel(reg | S5P_JPEG_HOR_SCALING(x_value) | S5P_JPEG_VER_SCALING(y_value), base + S5P_JPEG_CNTL_REG); } void jpeg_set_sys_int_enable(void __iomem *base, int value) { unsigned int reg; reg = readl(base + S5P_JPEG_CNTL_REG) & ~(S5P_JPEG_SYS_INT_EN); if (value == 1) writel(S5P_JPEG_SYS_INT_EN, base + S5P_JPEG_CNTL_REG); else writel(~S5P_JPEG_SYS_INT_EN, base + S5P_JPEG_CNTL_REG); } void jpeg_set_stream_buf_address(void __iomem *base, unsigned int address) { writel(address, base + S5P_JPEG_OUT_MEM_BASE_REG); } void jpeg_set_stream_size(void __iomem *base, unsigned int x_value, unsigned int y_value) { writel(0x0, base + S5P_JPEG_IMG_SIZE_REG); /* clear */ writel(S5P_JPEG_X_SIZE(x_value) | S5P_JPEG_Y_SIZE(y_value), base + S5P_JPEG_IMG_SIZE_REG); } void jpeg_set_frame_buf_address(void __iomem *base, enum jpeg_frame_format fmt, unsigned int address, unsigned int width, unsigned int height) { switch (fmt) { case GRAY: case RGB_565: case RGB_888: case YCRYCB_422_1P: case YCBYCR_422_1P: case BGR_888: case CBYCRY_422_1P: case CRYCBY_422_1P: writel(address, base + S5P_JPEG_IMG_BA_PLANE_1_REG); writel(0, base + S5P_JPEG_IMG_BA_PLANE_2_REG); writel(0, base + S5P_JPEG_IMG_BA_PLANE_3_REG); break; case YCBCR_444_2P: case YCRCB_444_2P: case YCRCB_422_2P: case YCBCR_422_2P: case YCBCR_420_2P: case YCRCB_420_2P: writel(address, base + S5P_JPEG_IMG_BA_PLANE_1_REG); writel(address + (width * height), base + S5P_JPEG_IMG_BA_PLANE_2_REG); writel(0, base + S5P_JPEG_IMG_BA_PLANE_3_REG); break; case YCBCR_444_3P: writel(address, base + S5P_JPEG_IMG_BA_PLANE_1_REG); writel(address + (width * height), base + S5P_JPEG_IMG_BA_PLANE_2_REG); writel(address + ((width * height) << 1), base + S5P_JPEG_IMG_BA_PLANE_3_REG); break; case YCBYCR_422_3P: writel(address, base + S5P_JPEG_IMG_BA_PLANE_1_REG); writel(address + (width * height), base + S5P_JPEG_IMG_BA_PLANE_2_REG); writel(address + (width * height + ((width * height) >> 1)), base + S5P_JPEG_IMG_BA_PLANE_3_REG); break; case YCBCR_420_3P: writel(address, base + S5P_JPEG_IMG_BA_PLANE_1_REG); writel(address + (width * height), base + S5P_JPEG_IMG_BA_PLANE_2_REG); writel(address + (width * height + ((width * height) >> 2)), base + S5P_JPEG_IMG_BA_PLANE_3_REG); break; case YCRCB_420_3P: writel(address, base + S5P_JPEG_IMG_BA_PLANE_1_REG); writel(address + (width * height + ((width * height) >> 2)), base + S5P_JPEG_IMG_BA_PLANE_2_REG); writel(address + (width * height), base + S5P_JPEG_IMG_BA_PLANE_3_REG); break; default: break; } } void jpeg_set_encode_tbl_select(void __iomem *base, enum jpeg_img_quality_level level) { unsigned int reg; switch (level) { case QUALITY_LEVEL_1: reg = S5P_JPEG_Q_TBL_COMP1_0 | S5P_JPEG_Q_TBL_COMP2_1 | S5P_JPEG_Q_TBL_COMP3_1 | S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 | S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 | S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1; break; case QUALITY_LEVEL_2: reg = S5P_JPEG_Q_TBL_COMP1_0 | S5P_JPEG_Q_TBL_COMP2_3 | S5P_JPEG_Q_TBL_COMP3_3 | S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 | S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 | S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1; break; case QUALITY_LEVEL_3: reg = S5P_JPEG_Q_TBL_COMP1_2 | S5P_JPEG_Q_TBL_COMP2_1 | S5P_JPEG_Q_TBL_COMP3_1 | S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 | S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 | S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1; break; case QUALITY_LEVEL_4: reg = S5P_JPEG_Q_TBL_COMP1_2 | S5P_JPEG_Q_TBL_COMP2_3 | S5P_JPEG_Q_TBL_COMP3_3 | S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 | S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 | S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1; break; default: reg = S5P_JPEG_Q_TBL_COMP1_0 | S5P_JPEG_Q_TBL_COMP2_1 | S5P_JPEG_Q_TBL_COMP3_1 | S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 | S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 | S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1; break; } writel(reg, base + S5P_JPEG_TBL_SEL_REG); } void jpeg_set_encode_hoff_cnt(void __iomem *base, enum jpeg_frame_format fmt) { if (fmt == JPEG_GRAY) writel(0xd2, base + S5P_JPEG_HUFF_CNT_REG); else writel(0x1a2, base + S5P_JPEG_HUFF_CNT_REG); } unsigned int jpeg_get_stream_size(void __iomem *base) { unsigned int size; size = readl(base + S5P_JPEG_BITSTREAM_SIZE_REG); return size; } void jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size) { writel(size, base + S5P_JPEG_BITSTREAM_SIZE_REG); } void jpeg_set_timer_count(void __iomem *base, unsigned int size) { writel(size, base + S5P_JPEG_INT_TIMER_COUNT_REG); } void jpeg_get_frame_size(void __iomem *base, unsigned int *width, unsigned int *height) { *width = (readl(base + S5P_JPEG_DECODE_XY_SIZE_REG) & S5P_JPEG_DECODED_SIZE_MASK); *height = (readl(base + S5P_JPEG_DECODE_XY_SIZE_REG) >> 16) & S5P_JPEG_DECODED_SIZE_MASK ; } enum jpeg_frame_format jpeg_get_frame_fmt(void __iomem *base) { unsigned int reg; enum jpeg_frame_format out_format; reg = readl(base + S5P_JPEG_DECODE_IMG_FMT_REG); out_format = ((reg & 0x03) == 0x01) ? JPEG_444 : ((reg & 0x03) == 0x02) ? JPEG_422 : ((reg & 0x03) == 0x03) ? JPEG_420 : ((reg & 0x03) == 0x00) ? JPEG_GRAY : JPEG_RESERVED; return out_format; }
gpl-2.0
popazerty/SDG-e2
lib/gdi/gpixmap.cpp
2
25410
#include <cstdlib> #include <cstring> #include <lib/gdi/gpixmap.h> #include <lib/gdi/region.h> #include <lib/gdi/accel.h> #include <byteswap.h> #ifndef BYTE_ORDER #error "no BYTE_ORDER defined!" #endif // #define GPIXMAP_DEBUG #ifdef GPIXMAP_DEBUG # include "../base/benchmark.h" #endif gLookup::gLookup() :size(0), lookup(0) { } gLookup::gLookup(int size, const gPalette &pal, const gRGB &start, const gRGB &end) :size(0), lookup(0) { build(size, pal, start, end); } void gLookup::build(int _size, const gPalette &pal, const gRGB &start, const gRGB &end) { if (lookup) { delete [] lookup; lookup=0; size=0; } size=_size; if (!size) return; lookup=new gColor[size]; lookup[0] = pal.findColor(start); const int rsize = end.r - start.r; const int gsize = end.g - start.g; const int bsize = end.b - start.b; const int asize = end.a - start.a; const int size_1 = size - 1; for (int i=1; i<size; i++) { gRGB col; int rdiff = (rsize * i) / size_1; int gdiff = (gsize * i) / size_1; int bdiff = (bsize * i) / size_1; int adiff = (asize * i) / size_1; col.r = start.r + rdiff; col.g = start.g + gdiff; col.b = start.b + bdiff; col.a = start.a + adiff; lookup[i] = pal.findColor(col); } } gUnmanagedSurface::gUnmanagedSurface(): x(0), y(0), bpp(0), bypp(0), stride(0), data(0), data_phys(0) { } gUnmanagedSurface::gUnmanagedSurface(int width, int height, int _bpp): x(width), y(height), bpp(_bpp), data(0), data_phys(0) { switch (_bpp) { case 8: bypp = 1; break; case 15: case 16: bypp = 2; break; case 24: // never use 24bit mode case 32: bypp = 4; break; default: bypp = (bpp+7)/8; } stride = x*bypp; } #ifdef GPIXMAP_DEBUG unsigned int pixmap_total_size = 0; unsigned int pixmap_total_count = 0; static void added_pixmap(int size) { ++pixmap_total_count; pixmap_total_size += size; eDebug("[gSurface] Added %dk, total %u pixmaps, %uk", size>>10, pixmap_total_count, pixmap_total_size>>10); } static void removed_pixmap(int size) { --pixmap_total_count; pixmap_total_size -= size; eDebug("[gSurface] Removed %dk, total %u pixmaps, %uk", size>>10, pixmap_total_count, pixmap_total_size>>10); } #else static inline void added_pixmap(int size) {} static inline void removed_pixmap(int size) {} #endif static bool is_a_candidate_for_accel(const gUnmanagedSurface* surface) { if (surface->stride < 48) return false; switch (surface->bpp) { case 8: return (surface->y * surface->stride) > 12000; case 32: return (surface->y * surface->stride) > 48000; default: return false; } } gSurface::gSurface(int width, int height, int _bpp, int accel): gUnmanagedSurface(width, height, _bpp) { if ((accel > gPixmap::accelAuto) || ((accel == gPixmap::accelAuto) && (is_a_candidate_for_accel(this)))) { if (gAccel::getInstance()->accelAlloc(this) != 0) eDebug("ERROR: accelAlloc failed"); } if (!data) { data = new unsigned char [y * stride]; added_pixmap(y * stride); } } gSurface::~gSurface() { gAccel::getInstance()->accelFree(this); if (data) { delete [] (unsigned char*)data; removed_pixmap(y * stride); } if (clut.data) { delete [] clut.data; } } void gPixmap::fill(const gRegion &region, const gColor &color) { unsigned int i; for (i=0; i<region.rects.size(); ++i) { const eRect &area = region.rects[i]; if (area.empty()) continue; if (surface->bpp == 8) { for (int y=area.top(); y<area.bottom(); y++) memset(((__u8*)surface->data)+y*surface->stride+area.left(), color.color, area.width()); } else if (surface->bpp == 16) { uint32_t icol; if (surface->clut.data && color < surface->clut.colors) icol=surface->clut.data[color].argb(); else icol=0x10101*color; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t col = bswap_16(((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19); #else uint16_t col = ((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19; #endif for (int y=area.top(); y<area.bottom(); y++) { uint16_t *dst=(uint16_t*)(((uint8_t*)surface->data)+y*surface->stride+area.left()*surface->bypp); int x=area.width(); while (x--) *dst++=col; } } else if (surface->bpp == 32) { uint32_t col; if (surface->clut.data && color < surface->clut.colors) col = surface->clut.data[color].argb(); else #if defined(__sh__) if ((col&0xFF000000) == 0xFF000000) col = 0xFF000000; #endif col = 0x10101 * color; col^=0xFF000000; if (surface->data_phys) if (!gAccel::getInstance()->fill(surface, area, col)) continue; for (int y=area.top(); y<area.bottom(); y++) { uint32_t *dst=(uint32_t*)(((uint8_t*)surface->data)+y*surface->stride+area.left()*surface->bypp); int x=area.width(); while (x--) *dst++=col; } } else eWarning("couldn't fill %d bpp", surface->bpp); } } void gPixmap::fill(const gRegion &region, const gRGB &color) { unsigned int i; for (i=0; i<region.rects.size(); ++i) { const eRect &area = region.rects[i]; if (area.empty()) continue; if (surface->bpp == 32) { uint32_t col; col = color.argb(); #if defined(__sh__) if ((col&0xFF000000) == 0xFF000000) col = 0xFF000000; #endif col^=0xFF000000; #ifdef GPIXMAP_DEBUG Stopwatch s; #endif if (surface->data_phys && (area.surface() > 20000)) if (!gAccel::getInstance()->fill(surface, area, col)) { #ifdef GPIXMAP_DEBUG s.stop(); eDebug("[BLITBENCH] accel fill %dx%d took %u us", area.width(), area.height(), s.elapsed_us()); #endif continue; } for (int y=area.top(); y<area.bottom(); y++) { uint32_t *dst=(uint32_t*)(((uint8_t*)surface->data)+y*surface->stride+area.left()*surface->bypp); int x=area.width(); while (x--) *dst++=col; } #ifdef GPIXMAP_DEBUG s.stop(); eDebug("[BLITBENCH] cpu fill %dx%d took %u us", area.width(), area.height(), s.elapsed_us()); #endif } else if (surface->bpp == 16) { uint32_t icol = color.argb(); #if BYTE_ORDER == LITTLE_ENDIAN uint16_t col = bswap_16(((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19); #else uint16_t col = ((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19; #endif for (int y=area.top(); y<area.bottom(); y++) { uint16_t *dst=(uint16_t*)(((uint8_t*)surface->data)+y*surface->stride+area.left()*surface->bypp); int x=area.width(); while (x--) *dst++=col; } } else eWarning("couldn't rgbfill %d bpp", surface->bpp); } } static inline void blit_8i_to_32(uint32_t *dst, const uint8_t *src, const uint32_t *pal, int width) { while (width--) *dst++=pal[*src++]; } static inline void blit_8i_to_32_at(uint32_t *dst, const uint8_t *src, const uint32_t *pal, int width) { while (width--) { if (!(pal[*src]&0x80000000)) { src++; dst++; } else *dst++=pal[*src++]; } } static inline void blit_8i_to_16(uint16_t *dst, const uint8_t *src, const uint32_t *pal, int width) { while (width--) *dst++=pal[*src++] & 0xFFFF; } static inline void blit_8i_to_16_at(uint16_t *dst, const uint8_t *src, const uint32_t *pal, int width) { while (width--) { if (!(pal[*src]&0x80000000)) { src++; dst++; } else *dst++=pal[*src++] & 0xFFFF; } } static void blit_8i_to_32_ab(gRGB *dst, const uint8_t *src, const gRGB *pal, int width) { while (width--) { dst->alpha_blend(pal[*src++]); ++dst; } } static void convert_palette(uint32_t* pal, const gPalette& clut) { int i = 0; if (clut.data) { while (i < clut.colors) { pal[i] = clut.data[i].argb() ^ 0xFF000000; ++i; } } for(; i != 256; ++i) { pal[i] = (0x010101*i) | 0xFF000000; } } #define FIX 0x10000 void gPixmap::blit(const gPixmap &src, const eRect &_pos, const gRegion &clip, int flag) { bool accel = (surface->data_phys && src.surface->data_phys); // eDebug("blit: -> %d,%d+%d,%d -> %d,%d+%d,%d, flags=0x%x, accel=%d", // _pos.x(), _pos.y(), _pos.width(), _pos.height(), // clip.extends.x(), clip.extends.y(), clip.extends.width(), clip.extends.height(), // flag, accel); eRect pos = _pos; // eDebug("source size: %d %d", src.size().width(), src.size().height()); if (!(flag & blitScale)) /* pos' size is valid only when scaling */ pos = eRect(pos.topLeft(), src.size()); else if (pos.size() == src.size()) /* no scaling required */ flag &= ~blitScale; int scale_x = FIX, scale_y = FIX; if (flag & blitScale) { ASSERT(src.size().width()); ASSERT(src.size().height()); scale_x = pos.size().width() * FIX / src.size().width(); scale_y = pos.size().height() * FIX / src.size().height(); if (flag & blitKeepAspectRatio) { if (scale_x > scale_y) { pos = eRect(ePoint(pos.x() + (scale_x - scale_y) * pos.width() / (2 * FIX), pos.y()), eSize(src.size().width() * pos.height() / src.size().height(), pos.height())); scale_x = scale_y; } else { pos = eRect(ePoint(pos.x(), pos.y() + (scale_y - scale_x) * pos.height() / (2 * FIX)), eSize(pos.width(), src.size().height() * pos.width() / src.size().width())); scale_y = scale_x; } } } // eDebug("SCALE %x %x", scale_x, scale_y); for (unsigned int i=0; i<clip.rects.size(); ++i) { // eDebug("clip rect: %d %d %d %d", clip.rects[i].x(), clip.rects[i].y(), clip.rects[i].width(), clip.rects[i].height()); eRect area = pos; /* pos is the virtual (pre-clipping) area on the dest, which can be larger/smaller than src if scaling is enabled */ area&=clip.rects[i]; area&=eRect(ePoint(0, 0), size()); if (area.empty()) continue; eRect srcarea = area; srcarea.moveBy(-pos.x(), -pos.y()); // eDebug("srcarea before scale: %d %d %d %d", // srcarea.x(), srcarea.y(), srcarea.width(), srcarea.height()); if (flag & blitScale) srcarea = eRect(srcarea.x() * FIX / scale_x, srcarea.y() * FIX / scale_y, srcarea.width() * FIX / scale_x, srcarea.height() * FIX / scale_y); // eDebug("srcarea after scale: %d %d %d %d", // srcarea.x(), srcarea.y(), srcarea.width(), srcarea.height()); if (accel) { /* we have hardware acceleration for this blit operation */ if (flag & (blitAlphaTest | blitAlphaBlend)) { /* alpha blending is requested */ if (gAccel::getInstance()->hasAlphaBlendingSupport()) { /* Hardware alpha blending is broken on the few * boxes that support it, so only use it * when scaling */ if (flag & blitScale) accel = true; else if (flag & blitAlphaTest) /* Alpha test only on 8-bit */ accel = (src.surface->bpp == 8); else accel = false; } else { /* our hardware does not support alphablending */ accel = false; } } } #ifdef GPIXMAP_DEBUG Stopwatch s; #endif if (accel) { if (!gAccel::getInstance()->blit(surface, src.surface, area, srcarea, flag)) { #ifdef GPIXMAP_DEBUG s.stop(); eDebug("[BLITBENCH] accel blit took %u us", s.elapsed_us()); #endif continue; } } if (flag & blitScale) { if ((surface->bpp == 32) && (src.surface->bpp==8)) { const uint8_t *srcptr = (uint8_t*)src.surface->data; uint8_t *dstptr=(uint8_t*)surface->data; // !! uint32_t pal[256]; convert_palette(pal, src.surface->clut); const int src_stride = src.surface->stride; srcptr += srcarea.left()*src.surface->bypp + srcarea.top()*src_stride; dstptr += area.left()*surface->bypp + area.top()*surface->stride; const int width = area.width(); const int height = area.height(); const int src_height = srcarea.height(); const int src_width = srcarea.width(); if (flag & blitAlphaTest) { for (int y = 0; y < height; ++y) { const uint8_t *src_row_ptr = srcptr + (((y * src_height) / height) * src_stride); uint32_t *dst = (uint32_t*)dstptr; for (int x = 0; x < width; ++x) { uint32_t pixel = pal[src_row_ptr[(x *src_width) / width]]; if (pixel & 0x80000000) *dst = pixel; ++dst; } dstptr += surface->stride; } } else if (flag & blitAlphaBlend) { for (int y = 0; y < height; ++y) { const uint8_t *src_row_ptr = srcptr + (((y * src_height) / height) * src_stride); gRGB *dst = (gRGB*)dstptr; for (int x = 0; x < width; ++x) { dst->alpha_blend(pal[src_row_ptr[(x * src_width) / width]]); ++dst; } dstptr += surface->stride; } } else { for (int y = 0; y < height; ++y) { const uint8_t *src_row_ptr = srcptr + (((y * src_height) / height) * src_stride); uint32_t *dst = (uint32_t*)dstptr; for (int x = 0; x < width; ++x) { *dst = pal[src_row_ptr[(x * src_width) / width]]; ++dst; } dstptr += surface->stride; } } } else if ((surface->bpp == 32) && (src.surface->bpp == 32)) { const int src_stride = src.surface->stride; const uint8_t* srcptr = (const uint8_t*)src.surface->data + srcarea.left()*src.surface->bypp + srcarea.top()*src_stride; uint8_t* dstptr = (uint8_t*)surface->data + area.left()*surface->bypp + area.top()*surface->stride; const int width = area.width(); const int height = area.height(); const int src_height = srcarea.height(); const int src_width = srcarea.width(); if (flag & blitAlphaTest) { for (int y = 0; y < height; ++y) { const uint32_t *src_row_ptr = (uint32_t*)(srcptr + (((y * src_height) / height) * src_stride)); uint32_t *dst = (uint32_t*)dstptr; for (int x = 0; x < width; ++x) { uint32_t pixel = src_row_ptr[(x *src_width) / width]; if (pixel & 0x80000000) *dst = pixel; ++dst; } dstptr += surface->stride; } } else if (flag & blitAlphaBlend) { for (int y = 0; y < height; ++y) { const gRGB *src_row_ptr = (gRGB *)(srcptr + (((y * src_height) / height) * src_stride)); gRGB *dst = (gRGB*)dstptr; for (int x = 0; x < width; ++x) { dst->alpha_blend(src_row_ptr[(x * src_width) / width]); ++dst; } dstptr += surface->stride; } } else { for (int y = 0; y < height; ++y) { const uint32_t *src_row_ptr = (uint32_t*)(srcptr + (((y * src_height) / height) * src_stride)); uint32_t *dst = (uint32_t*)dstptr; for (int x = 0; x < width; ++x) { *dst = src_row_ptr[(x * src_width) / width]; ++dst; } dstptr += surface->stride; } } } else { eWarning("unimplemented: scale on non-accel surface %d->%d bpp", src.surface->bpp, surface->bpp); } #ifdef GPIXMAP_DEBUG s.stop(); eDebug("[BLITBENCH] CPU scale blit took %u us", s.elapsed_us()); #endif continue; } if ((surface->bpp == 8) && (src.surface->bpp == 8)) { uint8_t *srcptr=(uint8_t*)src.surface->data; uint8_t *dstptr=(uint8_t*)surface->data; srcptr+=srcarea.left()*src.surface->bypp+srcarea.top()*src.surface->stride; dstptr+=area.left()*surface->bypp+area.top()*surface->stride; if (flag & (blitAlphaTest|blitAlphaBlend)) { for (int y = area.height(); y != 0; --y) { // no real alphatest yet int width=area.width(); unsigned char *s = (unsigned char*)srcptr; unsigned char *d = (unsigned char*)dstptr; // use duff's device here! while (width--) { if (!*s) { s++; d++; } else { *d++ = *s++; } } srcptr += src.surface->stride; dstptr += surface->stride; } } else { int linesize = area.width()*surface->bypp; for (int y = area.height(); y != 0; --y) { memcpy(dstptr, srcptr, linesize); srcptr += src.surface->stride; dstptr += surface->stride; } } } else if ((surface->bpp == 32) && (src.surface->bpp==32)) { uint32_t *srcptr=(uint32_t*)src.surface->data; uint32_t *dstptr=(uint32_t*)surface->data; srcptr+=srcarea.left()+srcarea.top()*src.surface->stride/4; dstptr+=area.left()+area.top()*surface->stride/4; for (int y = area.height(); y != 0; --y) { if (flag & blitAlphaTest) { int width=area.width(); unsigned long *src=(unsigned long*)srcptr; unsigned long *dst=(unsigned long*)dstptr; while (width--) { if (!((*src)&0xFF000000)) { src++; dst++; } else *dst++=*src++; } } else if (flag & blitAlphaBlend) { int width = area.width(); gRGB *src = (gRGB*)srcptr; gRGB *dst = (gRGB*)dstptr; while (width--) { dst->alpha_blend(*src++); ++dst; } } else memcpy(dstptr, srcptr, area.width()*surface->bypp); srcptr = (uint32_t*)((uint8_t*)srcptr + src.surface->stride); dstptr = (uint32_t*)((uint8_t*)dstptr + surface->stride); } } else if ((surface->bpp == 32) && (src.surface->bpp==8)) { const uint8_t *srcptr = (uint8_t*)src.surface->data; uint8_t *dstptr=(uint8_t*)surface->data; // !! uint32_t pal[256]; convert_palette(pal, src.surface->clut); srcptr+=srcarea.left()*src.surface->bypp+srcarea.top()*src.surface->stride; dstptr+=area.left()*surface->bypp+area.top()*surface->stride; const int width=area.width(); for (int y = area.height(); y != 0; --y) { if (flag & blitAlphaTest) blit_8i_to_32_at((uint32_t*)dstptr, srcptr, pal, width); else if (flag & blitAlphaBlend) blit_8i_to_32_ab((gRGB*)dstptr, srcptr, (const gRGB*)pal, width); else blit_8i_to_32((uint32_t*)dstptr, srcptr, pal, width); srcptr += src.surface->stride; dstptr += surface->stride; } } else if ((surface->bpp == 16) && (src.surface->bpp==8)) { uint8_t *srcptr=(uint8_t*)src.surface->data; uint8_t *dstptr=(uint8_t*)surface->data; // !! uint32_t pal[256]; for (int i=0; i != 256; ++i) { uint32_t icol; if (src.surface->clut.data && (i<src.surface->clut.colors)) icol = src.surface->clut.data[i].argb(); else icol=0x010101*i; #if BYTE_ORDER == LITTLE_ENDIAN pal[i] = bswap_16(((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19); #else pal[i] = ((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19; #endif pal[i]^=0xFF000000; } srcptr+=srcarea.left()*src.surface->bypp+srcarea.top()*src.surface->stride; dstptr+=area.left()*surface->bypp+area.top()*surface->stride; if (flag & blitAlphaBlend) eWarning("ignore unsupported 8bpp -> 16bpp alphablend!"); for (int y=0; y<area.height(); y++) { int width=area.width(); unsigned char *psrc=(unsigned char*)srcptr; uint16_t *dst=(uint16_t*)dstptr; if (flag & blitAlphaTest) blit_8i_to_16_at(dst, psrc, pal, width); else blit_8i_to_16(dst, psrc, pal, width); srcptr+=src.surface->stride; dstptr+=surface->stride; } } else if ((surface->bpp == 16) && (src.surface->bpp==32)) { uint8_t *srcptr=(uint8_t*)src.surface->data; uint8_t *dstptr=(uint8_t*)surface->data; srcptr+=srcarea.left()+srcarea.top()*src.surface->stride; dstptr+=area.left()+area.top()*surface->stride; if (flag & blitAlphaBlend) eWarning("ignore unsupported 32bpp -> 16bpp alphablend!"); for (int y=0; y<area.height(); y++) { int width=area.width(); uint32_t *srcp=(uint32_t*)srcptr; uint16_t *dstp=(uint16_t*)dstptr; if (flag & blitAlphaTest) { while (width--) { if (!((*srcp)&0xFF000000)) { srcp++; dstp++; } else { uint32_t icol = *srcp++; #if BYTE_ORDER == LITTLE_ENDIAN *dstp++ = bswap_16(((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19); #else *dstp++ = ((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19; #endif } } } else { while (width--) { uint32_t icol = *srcp++; #if BYTE_ORDER == LITTLE_ENDIAN *dstp++ = bswap_16(((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19); #else *dstp++ = ((icol & 0xFF) >> 3) << 11 | ((icol & 0xFF00) >> 10) << 5 | (icol & 0xFF0000) >> 19; #endif } } srcptr+=src.surface->stride; dstptr+=surface->stride; } } else eWarning("cannot blit %dbpp from %dbpp", surface->bpp, src.surface->bpp); #ifdef GPIXMAP_DEBUG s.stop(); eDebug("[BLITBENCH] cpu blit took %u us", s.elapsed_us()); #endif } } #undef FIX void gPixmap::mergePalette(const gPixmap &target) { if ((!surface->clut.colors) || (!target.surface->clut.colors)) return; gColor *lookup=new gColor[surface->clut.colors]; for (int i=0; i<surface->clut.colors; i++) lookup[i].color=target.surface->clut.findColor(surface->clut.data[i]); delete [] surface->clut.data; surface->clut.colors=target.surface->clut.colors; surface->clut.data=new gRGB[surface->clut.colors]; memcpy(surface->clut.data, target.surface->clut.data, sizeof(gRGB)*surface->clut.colors); uint8_t *dstptr=(uint8_t*)surface->data; for (int ay=0; ay<surface->y; ay++) { for (int ax=0; ax<surface->x; ax++) dstptr[ax]=lookup[dstptr[ax]]; dstptr+=surface->stride; } delete [] lookup; } static inline int sgn(int a) { if (a < 0) return -1; else if (!a) return 0; else return 1; } void gPixmap::line(const gRegion &clip, ePoint start, ePoint dst, gColor color) { uint32_t col = color; if (surface->bpp != 8) { if (surface->clut.data && color < surface->clut.colors) col = surface->clut.data[color].argb(); else col = 0x10101*color; col^=0xFF000000; } if (surface->bpp == 16) { #if BYTE_ORDER == LITTLE_ENDIAN col = bswap_16(((col & 0xFF) >> 3) << 11 | ((col & 0xFF00) >> 10) << 5 | (col & 0xFF0000) >> 19); #else col = ((col & 0xFF) >> 3) << 11 | ((col & 0xFF00) >> 10) << 5 | (col & 0xFF0000) >> 19; #endif } line(clip, start, dst, col); } void gPixmap::line(const gRegion &clip, ePoint start, ePoint dst, gRGB color) { uint32_t col; col = color.argb(); col^=0xFF000000; line(clip, start, dst, col); } void gPixmap::line(const gRegion &clip, ePoint start, ePoint dst, unsigned int color) { if (clip.rects.empty()) return; uint8_t *srf8 = 0; uint16_t *srf16 = 0; uint32_t *srf32 = 0; int stride = surface->stride; switch (surface->bpp) { case 8: srf8 = (uint8_t*)surface->data; break; case 16: srf16 = (uint16_t*)surface->data; stride /= 2; break; case 32: srf32 = (uint32_t*)surface->data; stride /= 4; break; } int xa = start.x(), ya = start.y(), xb = dst.x(), yb = dst.y(); int dx, dy, x, y, s1, s2, e, temp, swap, i; dy=abs(yb-ya); dx=abs(xb-xa); s1=sgn(xb-xa); s2=sgn(yb-ya); x=xa; y=ya; if (dy>dx) { temp=dx; dx=dy; dy=temp; swap=1; } else swap=0; e = 2*dy-dx; int lasthit = 0; for(i=1; i<=dx; i++) { /* i don't like this clipping loop, but the only */ /* other choice i see is to calculate the intersections */ /* before iterating through the pixels. */ /* one could optimize this because of the ordering */ /* of the bands. */ lasthit = 0; int a = lasthit; /* if last pixel was invisble, first check bounding box */ if (a == -1) { /* check if we just got into the bbox again */ if (clip.extends.contains(x, y)) lasthit = a = 0; else goto fail; } else if (!clip.rects[a].contains(x, y)) { do { ++a; if ((unsigned int)a == clip.rects.size()) a = 0; if (a == lasthit) { goto fail; lasthit = -1; } } while (!clip.rects[a].contains(x, y)); lasthit = a; } if (srf8) srf8[y * stride + x] = color; else if (srf16) srf16[y * stride + x] = color; else srf32[y * stride + x] = color; fail: while (e>=0) { if (swap==1) x+=s1; else y+=s2; e-=2*dx; } if (swap==1) y+=s2; else x+=s1; e+=2*dy; } } gColor gPalette::findColor(const gRGB rgb) const { /* grayscale? */ if (!data) return (rgb.r + rgb.g + rgb.b) / 3; if (rgb.a == 255) /* Fully transparent, then RGB does not matter */ { for (int t=0; t<colors; t++) if (data[t].a == 255) return t; } int difference=1<<30, best_choice=0; for (int t=0; t<colors; t++) { int ttd; int td=(signed)(rgb.r-data[t].r); td*=td; td*=(255-data[t].a); ttd=td; if (ttd>=difference) continue; td=(signed)(rgb.g-data[t].g); td*=td; td*=(255-data[t].a); ttd+=td; if (ttd>=difference) continue; td=(signed)(rgb.b-data[t].b); td*=td; td*=(255-data[t].a); ttd+=td; if (ttd>=difference) continue; td=(signed)(rgb.a-data[t].a); td*=td; td*=255; ttd+=td; if (ttd>=difference) continue; if (!ttd) return t; difference=ttd; best_choice=t; } return best_choice; } DEFINE_REF(gPixmap); gPixmap::~gPixmap() { if (on_dispose) on_dispose(this); if (surface) delete (gSurface*)surface; } static void donot_delete_surface(gPixmap *pixmap) { pixmap->surface = NULL; } gPixmap::gPixmap(gUnmanagedSurface *surface): surface(surface), on_dispose(donot_delete_surface) { } gPixmap::gPixmap(eSize size, int bpp, int accel): surface(new gSurface(size.width(), size.height(), bpp, accel)), on_dispose(NULL) { } gPixmap::gPixmap(int width, int height, int bpp, gPixmapDisposeCallback call_on_dispose, int accel): surface(new gSurface(width, height, bpp, accel)), on_dispose(call_on_dispose) { }
gpl-2.0
milodky/tegra_git
arch/arm/mm/copypage-v6.c
514
3877
/* * linux/arch/arm/mm/copypage-v6.c * * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/shmparam.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> #include "mm.h" #if SHMLBA > 16384 #error FIX ME #endif #define from_address (0xffff8000) #define to_address (0xffffc000) static DEFINE_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of these pages. */ static void v6_copy_user_highpage_nonaliasing(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } /* * Clear the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of this page. */ static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { void *kaddr = kmap_atomic(page, KM_USER0); clear_page(kaddr); kunmap_atomic(kaddr, KM_USER0); } /* * Discard data in the kernel mapping for the new page. * FIXME: needs this MCRR to be supported. */ static void discard_old_kernel_data(void *kto) { __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" : : "r" (kto), "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) : "cc"); } /* * Copy the page, taking account of the cache colour. */ static void v6_copy_user_highpage_aliasing(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); /* FIXME: not highmem safe */ discard_old_kernel_data(page_address(to)); /* * Now copy the page using the same cache colour as the * pages ultimate destination. */ spin_lock(&v6_lock); set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); kfrom = from_address + (offset << PAGE_SHIFT); kto = to_address + (offset << PAGE_SHIFT); flush_tlb_kernel_page(kfrom); flush_tlb_kernel_page(kto); copy_page((void *)kto, (void *)kfrom); spin_unlock(&v6_lock); } /* * Clear the user page. We need to deal with the aliasing issues, * so remap the kernel page into the same cache colour as the user * page. */ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long to = to_address + (offset << PAGE_SHIFT); /* FIXME: not highmem safe */ discard_old_kernel_data(page_address(page)); /* * Now clear the page using the same cache colour as * the pages ultimate destination. */ spin_lock(&v6_lock); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); spin_unlock(&v6_lock); } struct cpu_user_fns v6_user_fns __initdata = { .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } return 0; } core_initcall(v6_userpage_init);
gpl-2.0
danbarsor/linux_kernel-2.6.35
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
514
39381
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" #include "iwl-agn-hw.h" #include "iwl-agn.h" /* * mac80211 queues, ACs, hardware queues, FIFOs. * * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues * * Mac80211 uses the following numbers, which we get as from it * by way of skb_get_queue_mapping(skb): * * VO 0 * VI 1 * BE 2 * BK 3 * * * Regular (not A-MPDU) frames are put into hardware queues corresponding * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their * own queue per aggregation session (RA/TID combination), such queues are * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In * order to map frames to the right queue, we also need an AC->hw queue * mapping. This is implemented here. * * Due to the way hw queues are set up (by the hw specific modules like * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity * mapping. */ static const u8 tid_to_ac[] = { /* this matches the mac80211 numbers */ 2, 3, 3, 2, 1, 1, 0, 0 }; static const u8 ac_to_fifo[] = { IWL_TX_FIFO_VO, IWL_TX_FIFO_VI, IWL_TX_FIFO_BE, IWL_TX_FIFO_BK, }; static inline int get_fifo_from_ac(u8 ac) { return ac_to_fifo[ac]; } static inline int get_ac_from_tid(u16 tid) { if (likely(tid < ARRAY_SIZE(tid_to_ac))) return tid_to_ac[tid]; /* no support for TIDs 8-15 yet */ return -EINVAL; } static inline int get_fifo_from_tid(u16 tid) { if (likely(tid < ARRAY_SIZE(tid_to_ac))) return get_fifo_from_ac(tid_to_ac[tid]); /* no support for TIDs 8-15 yet */ return -EINVAL; } /** * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, struct iwl_tx_queue *txq, u16 byte_cnt) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; int write_ptr = txq->q.write_ptr; int txq_id = txq->q.id; u8 sec_ctl = 0; u8 sta_id = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); if (txq_id != IWL_CMD_QUEUE_NUM) { sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: len += CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: len += TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: len += WEP_IV_LEN + WEP_ICV_LEN; break; } } bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id]. tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; } void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, struct iwl_tx_queue *txq) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; int txq_id = txq->q.id; int read_ptr = txq->q.read_ptr; u8 sta_id = 0; __le16 bc_ent; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); if (txq_id != IWL_CMD_QUEUE_NUM) sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id]. tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, u16 txq_id) { u32 tbl_dw_addr; u32 tbl_dw; u16 scd_q2ratid; scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; tbl_dw_addr = priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); if (txq_id & 0x1) tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); else tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); return 0; } static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) { /* Simply stop the queue, but don't change any configuration; * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)| (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } void iwlagn_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) { iwl_write_direct32(priv, HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index); } void iwlagn_tx_queue_set_status(struct iwl_priv *priv, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry) { int txq_id = txq->q.id; int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) | (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) | (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) | IWLAGN_SCD_QUEUE_STTS_REG_MSK); txq->sched_retry = scd_retry; IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", active ? "Activate" : "Deactivate", scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); } int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int tx_fifo, int sta_id, int tid, u16 ssn_idx) { unsigned long flags; u16 ra_tid; if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues <= txq_id)) { IWL_WARN(priv, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues - 1); return -EINVAL; } ra_tid = BUILD_RAxTID(sta_id, tid); /* Modify device's station table to Tx this TID */ iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); spin_lock_irqsave(&priv->lock, flags); /* Stop this Tx queue before configuring it */ iwlagn_tx_queue_stop_scheduler(priv, txq_id); /* Map receiver-address / traffic-ID to this queue */ iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); /* Set this queue as a chain-building queue */ iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id)); /* enable aggregations for the queue */ iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id)); /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(priv, priv->scd_base_addr + IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((SCD_WIN_SIZE << IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((SCD_FRAME_LIMIT << IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); spin_unlock_irqrestore(&priv->lock, flags); return 0; } int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx, u8 tx_fifo) { if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues <= txq_id)) { IWL_ERR(priv, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues - 1); return -EINVAL; } iwlagn_tx_queue_stop_scheduler(priv, txq_id); iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id)); priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); /* supposes that ssn_idx is valid (!= 0xFFF) */ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); iwl_txq_ctx_deactivate(priv, txq_id); iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); return 0; } /* * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask * must be called under priv->lock and mac access */ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) { iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); } static inline int get_queue_from_ac(u16 ac) { return ac; } /* * handle build REPLY_TX command notification. */ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_hdr *hdr, u8 std_id) { __le16 fc = hdr->frame_control; __le32 tx_flags = tx_cmd->tx_flags; tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { tx_flags |= TX_CMD_FLG_ACK_MSK; if (ieee80211_is_mgmt(fc)) tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; if (ieee80211_is_probe_resp(fc) && !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) tx_flags |= TX_CMD_FLG_TSF_MSK; } else { tx_flags &= (~TX_CMD_FLG_ACK_MSK); tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; tx_cmd->sta_id = std_id; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } else { tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); else tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); } else { tx_cmd->timeout.pm_frame_timeout = 0; } tx_cmd->driver_txop = 0; tx_cmd->tx_flags = tx_flags; tx_cmd->next_frame_len = 0; } #define RTS_DFAULT_RETRY_LIMIT 60 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, __le16 fc) { u32 rate_flags; int rate_idx; u8 rts_retry_limit; u8 data_retry_limit; u8 rate_plcp; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) data_retry_limit = 3; else data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; tx_cmd->data_retry_limit = data_retry_limit; /* Set retry limit on RTS packets */ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; if (data_retry_limit < rts_retry_limit) rts_retry_limit = data_retry_limit; tx_cmd->rts_retry_limit = rts_retry_limit; /* DATA packets will use the uCode station table for rate/antenna * selection */ if (ieee80211_is_data(fc)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; return; } /** * If the current TX rate stored in mac80211 has the MCS bit set, it's * not really a TX rate. Thus, we use the lowest supported rate for * this band. Also use the lowest supported rate if the stored rate * index is invalid. */ rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) rate_idx = rate_lowest_index(&priv->bands[info->band], info->control.sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ if (info->band == IEEE80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_rates[rate_idx].plcp; /* Zero out flags for this packet */ rate_flags = 0; /* Set CCK flag as needed */ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); } static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd, struct sk_buff *skb_frag, int sta_id) { struct ieee80211_key_conf *keyconf = info->control.hw_key; switch (keyconf->alg) { case ALG_CCMP: tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); if (info->flags & IEEE80211_TX_CTL_AMPDU) tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); break; case ALG_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; ieee80211_get_tkip_key(keyconf, skb_frag, IEEE80211_TKIP_P2_KEY, tx_cmd->key); IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); break; case ALG_WEP: tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); if (keyconf->keylen == WEP_KEY_LEN_128) tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " "with key %d\n", keyconf->keyidx); break; default: IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); break; } } /* * start REPLY_TX command process */ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = info->control.sta; struct iwl_station_priv *sta_priv = NULL; struct iwl_tx_queue *txq; struct iwl_queue *q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; struct iwl_tx_cmd *tx_cmd; int swq_id, txq_id; dma_addr_t phys_addr; dma_addr_t txcmd_phys; dma_addr_t scratch_phys; u16 len, len_org, firstlen, secondlen; u16 seq_number = 0; __le16 fc; u8 hdr_len; u8 sta_id; u8 wait_write_ptr = 0; u8 tid = 0; u8 *qc = NULL; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (iwl_is_rfkill(priv)) { IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); goto drop_unlock; } fc = hdr->frame_control; #ifdef CONFIG_IWLWIFI_DEBUG if (ieee80211_is_auth(fc)) IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); else if (ieee80211_is_assoc_req(fc)) IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); else if (ieee80211_is_reassoc_req(fc)) IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); #endif hdr_len = ieee80211_hdrlen(fc); /* Find index into station table for destination station */ if (!info->control.sta) sta_id = priv->hw_params.bcast_sta_id; else sta_id = iwl_sta_id(info->control.sta); if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); goto drop_unlock; } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); if (sta) sta_priv = (void *)sta->drv_priv; if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && sta_priv->asleep) { WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); /* * This sends an asynchronous command to the device, * but we can rely on it being processed before the * next frame is processed -- and the next frame to * this station is the one that will consume this * counter. * For now set the counter to just 1 since we do not * support uAPSD yet. */ iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); } txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); if (ieee80211_is_data_qos(fc)) { qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; if (unlikely(tid >= MAX_TID_COUNT)) goto drop_unlock; seq_number = priv->stations[sta_id].tid[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; hdr->seq_ctrl = hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); seq_number += 0x10; /* aggregation is on for this <sta,tid> */ if (info->flags & IEEE80211_TX_CTL_AMPDU && priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; } } txq = &priv->txq[txq_id]; swq_id = txq->swq_id; q = &txq->q; if (unlikely(iwl_queue_space(q) < q->high_mark)) goto drop_unlock; if (ieee80211_is_data_qos(fc)) priv->stations[sta_id].tid[tid].tfds_in_queue++; /* Set up driver data for this TFD */ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); txq->txb[q->write_ptr].skb[0] = skb; /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_cmd = txq->cmd[q->write_ptr]; out_meta = &txq->meta[q->write_ptr]; tx_cmd = &out_cmd->cmd.tx; memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); /* * Set up the Tx-command (not MAC!) header. * Store the chosen Tx queue and TFD index within the sequence field; * after Tx, uCode's Tx response will return this value so driver can * locate the frame within the tx queue and do post-tx processing. */ out_cmd->hdr.cmd = REPLY_TX; out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); /* Total # bytes to be transmitted */ len = (u16)skb->len; tx_cmd->len = cpu_to_le16(len); if (info->control.hw_key) iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); /* TODO need this for burst mode later on */ iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); iwl_dbg_log_tx_data_frame(priv, len, hdr); iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); iwl_update_stats(priv, true, fc, len); /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together * (payload data will be in another buffer). * Size of this varies, due to varying MAC header length. * If end is not dword aligned, we'll have 2 extra bytes at the end * of the MAC header (device reads on dword boundaries). * We'll tell device about this padding later. */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len; len_org = len; firstlen = len = (len + 3) & ~3; if (len_org != len) len_org = 1; else len_org = 0; /* Tell NIC about any 2-byte padding after MAC header */ if (len_org) tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, len, PCI_DMA_BIDIRECTIONAL); pci_unmap_addr_set(out_meta, mapping, txcmd_phys); pci_unmap_len_set(out_meta, len, len); /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, txcmd_phys, len, 1, 0); if (!ieee80211_has_morefrags(hdr->frame_control)) { txq->need_update = 1; if (qc) priv->stations[sta_id].tid[tid].seq_number = seq_number; } else { wait_write_ptr = 1; txq->need_update = 0; } /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = len = skb->len - hdr_len; if (len) { phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, len, PCI_DMA_TODEVICE); priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, phys_addr, len, 0, 0); } scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len; /* take back ownership of DMA buffer to enable update */ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, len, PCI_DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ if (info->flags & IEEE80211_TX_CTL_AMPDU) priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len)); pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, len, PCI_DMA_BIDIRECTIONAL); trace_iwlwifi_dev_tx(priv, &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], sizeof(struct iwl_tfd), &out_cmd->hdr, firstlen, skb->data + hdr_len, secondlen); /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually, * regardless of the value of ret. "ret" only indicates * whether or not we should update the write pointer. */ /* avoid atomic ops if it isn't an associated client */ if (sta_priv && sta_priv->client) atomic_inc(&sta_priv->pending_frames); if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { if (wait_write_ptr) { spin_lock_irqsave(&priv->lock, flags); txq->need_update = 1; iwl_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); } else { iwl_stop_queue(priv, txq->swq_id); } } return 0; drop_unlock: spin_unlock_irqrestore(&priv->lock, flags); return -1; } static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, struct iwl_dma_ptr *ptr, size_t size) { ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); if (!ptr->addr) return -ENOMEM; ptr->size = size; return 0; } static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, struct iwl_dma_ptr *ptr) { if (unlikely(!ptr->addr)) return; dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); memset(ptr, 0, sizeof(*ptr)); } /** * iwlagn_hw_txq_ctx_free - Free TXQ Context * * Destroy all TX DMA queues and structures */ void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) { int txq_id; /* Tx queues */ if (priv->txq) { for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) if (txq_id == IWL_CMD_QUEUE_NUM) iwl_cmd_queue_free(priv); else iwl_tx_queue_free(priv, txq_id); } iwlagn_free_dma_ptr(priv, &priv->kw); iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); /* free tx queue structure */ iwl_free_txq_mem(priv); } /** * iwlagn_txq_ctx_alloc - allocate TX queue context * Allocate all Tx DMA structures and initialize them * * @param priv * @return error code */ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) { int ret; int txq_id, slots_num; unsigned long flags; /* Free all tx/cmd queues and keep-warm buffer */ iwlagn_hw_txq_ctx_free(priv); ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, priv->hw_params.scd_bc_tbls_size); if (ret) { IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); goto error_bc_tbls; } /* Alloc keep-warm buffer */ ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); if (ret) { IWL_ERR(priv, "Keep Warm allocation failed\n"); goto error_kw; } /* allocate tx queue structure */ ret = iwl_alloc_txq_mem(priv); if (ret) goto error; spin_lock_irqsave(&priv->lock, flags); /* Turn off all Tx DMA fifos */ priv->cfg->ops->lib->txq_set_sched(priv, 0); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); spin_unlock_irqrestore(&priv->lock, flags); /* Alloc and init all Tx queues, including the command queue (#4) */ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, txq_id); if (ret) { IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); goto error; } } return ret; error: iwlagn_hw_txq_ctx_free(priv); iwlagn_free_dma_ptr(priv, &priv->kw); error_kw: iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); error_bc_tbls: return ret; } void iwlagn_txq_ctx_reset(struct iwl_priv *priv) { int txq_id, slots_num; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); /* Turn off all Tx DMA fifos */ priv->cfg->ops->lib->txq_set_sched(priv, 0); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); spin_unlock_irqrestore(&priv->lock, flags); /* Alloc and init all Tx queues, including the command queue (#4) */ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { slots_num = txq_id == IWL_CMD_QUEUE_NUM ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); } } /** * iwlagn_txq_ctx_stop - Stop all Tx DMA channels */ void iwlagn_txq_ctx_stop(struct iwl_priv *priv) { int ch; unsigned long flags; /* Turn off all Tx DMA fifos */ spin_lock_irqsave(&priv->lock, flags); priv->cfg->ops->lib->txq_set_sched(priv, 0); /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); } spin_unlock_irqrestore(&priv->lock, flags); } /* * Find first available (lowest unused) Tx Queue, mark it "active". * Called only when finding queue for aggregation. * Should never return anything < 7, because they should already * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) */ static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv) { int txq_id; for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) return txq_id; return -1; } int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { int sta_id; int tx_fifo; int txq_id; int ret; unsigned long flags; struct iwl_tid_data *tid_data; tx_fifo = get_fifo_from_tid(tid); if (unlikely(tx_fifo < 0)) return tx_fifo; IWL_WARN(priv, "%s on ra = %pM tid = %d\n", __func__, sta->addr, tid); sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Start AGG on invalid station\n"); return -ENXIO; } if (unlikely(tid >= MAX_TID_COUNT)) return -EINVAL; if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); return -ENXIO; } txq_id = iwlagn_txq_ctx_activate_free(priv); if (txq_id == -1) { IWL_ERR(priv, "No free aggregation queue available\n"); return -ENXIO; } spin_lock_irqsave(&priv->sta_lock, flags); tid_data = &priv->stations[sta_id].tid[tid]; *ssn = SEQ_TO_SN(tid_data->seq_number); tid_data->agg.txq_id = txq_id; priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id); spin_unlock_irqrestore(&priv->sta_lock, flags); ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, sta_id, tid, *ssn); if (ret) return ret; if (tid_data->tfds_in_queue == 0) { IWL_DEBUG_HT(priv, "HW queue is empty\n"); tid_data->agg.state = IWL_AGG_ON; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); } else { IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", tid_data->tfds_in_queue); tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; } return ret; } int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { int tx_fifo_id, txq_id, sta_id, ssn = -1; struct iwl_tid_data *tid_data; int write_ptr, read_ptr; unsigned long flags; tx_fifo_id = get_fifo_from_tid(tid); if (unlikely(tx_fifo_id < 0)) return tx_fifo_id; sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); return -ENXIO; } if (priv->stations[sta_id].tid[tid].agg.state == IWL_EMPTYING_HW_QUEUE_ADDBA) { IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; return 0; } if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); tid_data = &priv->stations[sta_id].tid[tid]; ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; txq_id = tid_data->agg.txq_id; write_ptr = priv->txq[txq_id].q.write_ptr; read_ptr = priv->txq[txq_id].q.read_ptr; /* The queue is not empty */ if (write_ptr != read_ptr) { IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); priv->stations[sta_id].tid[tid].agg.state = IWL_EMPTYING_HW_QUEUE_DELBA; return 0; } IWL_DEBUG_HT(priv, "HW queue is empty\n"); priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; spin_lock_irqsave(&priv->lock, flags); /* * the only reason this call can fail is queue number out of range, * which can happen if uCode is reloaded and all the station * information are lost. if it is outside the range, there is no need * to deactivate the uCode queue, just return "success" to allow * mac80211 to clean up it own data. */ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, tx_fifo_id); spin_unlock_irqrestore(&priv->lock, flags); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } int iwlagn_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) { struct iwl_queue *q = &priv->txq[txq_id].q; u8 *addr = priv->stations[sta_id].sta.sta.addr; struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; switch (priv->stations[sta_id].tid[tid].agg.state) { case IWL_EMPTYING_HW_QUEUE_DELBA: /* We are reclaiming the last packet of the */ /* aggregated HW queue */ if ((txq_id == tid_data->agg.txq_id) && (q->read_ptr == q->write_ptr)) { u16 ssn = SEQ_TO_SN(tid_data->seq_number); int tx_fifo = get_fifo_from_tid(tid); IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, tx_fifo); tid_data->agg.state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); } break; case IWL_EMPTYING_HW_QUEUE_ADDBA: /* We are reclaiming the last packet of the queue */ if (tid_data->tfds_in_queue == 0) { IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); tid_data->agg.state = IWL_AGG_ON; ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); } break; } return 0; } static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_sta *sta; struct iwl_station_priv *sta_priv; rcu_read_lock(); sta = ieee80211_find_sta(priv->vif, hdr->addr1); if (sta) { sta_priv = (void *)sta->drv_priv; /* avoid atomic ops if this isn't a client */ if (sta_priv->client && atomic_dec_return(&sta_priv->pending_frames) == 0) ieee80211_sta_block_awake(priv->hw, sta, false); } rcu_read_unlock(); ieee80211_tx_status_irqsafe(priv->hw, skb); } int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) { struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct iwl_queue *q = &txq->q; struct iwl_tx_info *tx_info; int nfreed = 0; struct ieee80211_hdr *hdr; if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " "is out of range [0-%d] %d %d.\n", txq_id, index, q->n_bd, q->write_ptr, q->read_ptr); return 0; } for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; iwlagn_tx_status(priv, tx_info->skb[0]); hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; if (hdr && ieee80211_is_data_qos(hdr->frame_control)) nfreed++; tx_info->skb[0] = NULL; if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); priv->cfg->ops->lib->txq_free_tfd(priv, txq); } return nfreed; } /** * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack * * Go through block-ack's bitmap of ACK'd frames, update driver's record of * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. */ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, struct iwl_ht_agg *agg, struct iwl_compressed_ba_resp *ba_resp) { int i, sh, ack; u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); u64 bitmap; int successes = 0; struct ieee80211_tx_info *info; if (unlikely(!agg->wait_for_ba)) { IWL_ERR(priv, "Received BA when not expected\n"); return -EINVAL; } /* Mark that the expected block-ack response arrived */ agg->wait_for_ba = 0; IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); /* Calculate shift to align block-ack bits with our Tx window bits */ sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); if (sh < 0) /* tbw something is wrong with indices */ sh += 0x100; /* don't use 64-bit values for now */ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; if (agg->frame_count > (64 - sh)) { IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); return -1; } /* check for success or failure according to the * transmitted bitmap and block-ack bitmap */ bitmap &= agg->bitmap; /* For each frame attempted in aggregation, * update driver's record of tx frame's status. */ for (i = 0; i < agg->frame_count ; i++) { ack = bitmap & (1ULL << i); successes += !!ack; IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, agg->start_idx + i); } info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); memset(&info->status, 0, sizeof(info->status)); info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = successes; info->status.ampdu_ack_map = bitmap; info->status.ampdu_len = agg->frame_count; iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); return 0; } /** * translate ucode response to mac80211 tx status control values */ void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->control.rates[0]; info->antenna_sel_tx = ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); if (rate_n_flags & RATE_MCS_HT_MSK) r->flags |= IEEE80211_TX_RC_MCS; if (rate_n_flags & RATE_MCS_GF_MSK) r->flags |= IEEE80211_TX_RC_GREEN_FIELD; if (rate_n_flags & RATE_MCS_HT40_MSK) r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rate_n_flags & RATE_MCS_DUP_MSK) r->flags |= IEEE80211_TX_RC_DUP_DATA; if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); } /** * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA * * Handles block-acknowledge notification from device, which reports success * of frames sent via aggregation. */ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; struct iwl_tx_queue *txq = NULL; struct iwl_ht_agg *agg; int index; int sta_id; int tid; /* "flow" corresponds to Tx queue */ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); if (scd_flow >= priv->hw_params.max_txq_num) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); return; } txq = &priv->txq[scd_flow]; sta_id = ba_resp->sta_id; tid = ba_resp->tid; agg = &priv->stations[sta_id].tid[tid].agg; if (unlikely(agg->txq_id != scd_flow)) { IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n", scd_flow, agg->txq_id); return; } /* Find index just before block-ack window */ index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); /* TODO: Need to get this copy more safely - now good for debug */ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, ba_resp->sta_id); IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, (unsigned long long)le64_to_cpu(ba_resp->bitmap), ba_resp->scd_flow, ba_resp->scd_ssn); IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx, (unsigned long long)agg->bitmap); /* Update driver's record of ACK vs. not for each frame in window */ iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); /* Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { /* calculate mac80211 ampdu sw queue to wake */ int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); iwl_free_tfds_in_queue(priv, sta_id, tid, freed); if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && priv->mac80211_registered && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) iwl_wake_queue(priv, txq->swq_id); iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); } }
gpl-2.0
eric8810/openwrt_chaos_calmer_mt7621
tools/firmware-utils/src/encode_crc.c
770
3662
/* ************************************************************************** This program creates a CRC checksum and encodes the file that is named in the command line. Compile with: gcc encode_crc.c -Wall -o encode_crc Author: Michael Margraf (michael.margraf@freecom.com) Copyright: Freecom Technology GmbH, Berlin, 2004 www.freecom.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ************************************************************************* */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> // ******************************************************************* // CCITT polynom G(x)=x^16+x^12+x^5+1 #define POLYNOM 0x1021 // CRC algorithm with MSB first int make_crc16(int crc, char new) { int i; crc = crc ^ (((int)new) << 8); for(i=0; i<8; i++) { // work on 8 bits in "new" crc <<= 1; // MSBs first if(crc & 0x10000) crc ^= POLYNOM; } return crc & 0xFFFF; } // ******************************************************************* // Reads the file "filename" into memory and returns pointer to the buffer. static char *readfile(char *filename, int *size) { FILE *fp; char *buffer; struct stat info; if (stat(filename,&info)!=0) return NULL; if ((fp=fopen(filename,"r"))==NULL) return NULL; buffer=NULL; for (;;) { if ((buffer=(char *)malloc(info.st_size+1))==NULL) break; if (fread(buffer,1,info.st_size,fp)!=info.st_size) { free(buffer); buffer=NULL; break; } buffer[info.st_size]='\0'; if(size) *size = info.st_size; break; } (void)fclose(fp); return buffer; } // ******************************************************************* int main(int argc, char** argv) { if(argc < 3) { printf("ERROR: Argument missing!\n\n"); return 1; } int count; // size of file in bytes char *p, *master = readfile(argv[1], &count); if(!master) { printf("ERROR: File not found!\n"); return 1; } int crc = 0xFFFF, z; p = master; for(z=0; z<count; z++) crc = make_crc16(crc, *(p++)); // calculate CRC short crc16 = (short)crc; /* if(argc > 2) { // with flag for device recognition ? p = argv[2]; for(z=strlen(p); z>0; z--) { crc ^= (int)(*p); *(p++) = (char)crc; // encode device flag } } */ p = master; for(z=0; z<count; z++) { crc ^= (int)(*p); *(p++) = (char)crc; // encode file } // write encoded file... FILE *fp = fopen(argv[2], "w"); if(!fp) { printf("ERROR: File not writeable!\n"); return 1; } if(argc > 3) { // add flag for device recognition ? fwrite(argv[3], strlen(argv[3]), sizeof(char), fp); } else { // Device is an FSG, so byte swap (IXP4xx is big endian) crc16 = ((crc16 >> 8) & 0xFF) | ((crc16 << 8) & 0xFF00); } fwrite(&crc16, 1, sizeof(short), fp); // first write CRC fwrite(master, count, sizeof(char), fp); // write content fclose(fp); free(master); return 0; }
gpl-2.0
ziozzang/linux
arch/arm/kernel/dma-isa.c
1538
5160
/* * linux/arch/arm/kernel/dma-isa.c * * Copyright (C) 1999-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * ISA DMA primitives * Taken from various sources, including: * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen and John Boyd, * Nov. 1992. * arch/arm/kernel/dma-ebsa285.c * Copyright (C) 1998 Phil Blundell */ #include <linux/ioport.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/mach/dma.h> #define ISA_DMA_MASK 0 #define ISA_DMA_MODE 1 #define ISA_DMA_CLRFF 2 #define ISA_DMA_PGHI 3 #define ISA_DMA_PGLO 4 #define ISA_DMA_ADDR 5 #define ISA_DMA_COUNT 6 static unsigned int isa_dma_port[8][7] = { /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */ { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 }, { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 }, { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 }, { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 }, { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 }, { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 }, { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca }, { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } }; static int isa_get_dma_residue(unsigned int chan, dma_t *dma) { unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT]; int count; count = 1 + inb(io_port); count |= inb(io_port) << 8; return chan < 4 ? count : (count << 1); } static void isa_enable_dma(unsigned int chan, dma_t *dma) { if (dma->invalid) { unsigned long address, length; unsigned int mode; enum dma_data_direction direction; mode = (chan & 3) | dma->dma_mode; switch (dma->dma_mode & DMA_MODE_MASK) { case DMA_MODE_READ: direction = DMA_FROM_DEVICE; break; case DMA_MODE_WRITE: direction = DMA_TO_DEVICE; break; case DMA_MODE_CASCADE: direction = DMA_BIDIRECTIONAL; break; default: direction = DMA_NONE; break; } if (!dma->sg) { /* * Cope with ISA-style drivers which expect cache * coherence. */ dma->sg = &dma->buf; dma->sgcount = 1; dma->buf.length = dma->count; dma->buf.dma_address = dma_map_single(NULL, dma->addr, dma->count, direction); } address = dma->buf.dma_address; length = dma->buf.length - 1; outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]); outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]); if (chan >= 4) { address >>= 1; length >>= 1; } outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]); outb(address, isa_dma_port[chan][ISA_DMA_ADDR]); outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]); outb(length, isa_dma_port[chan][ISA_DMA_COUNT]); outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]); outb(mode, isa_dma_port[chan][ISA_DMA_MODE]); dma->invalid = 0; } outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]); } static void isa_disable_dma(unsigned int chan, dma_t *dma) { outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]); } static struct dma_ops isa_dma_ops = { .type = "ISA", .enable = isa_enable_dma, .disable = isa_disable_dma, .residue = isa_get_dma_residue, }; static struct resource dma_resources[] = { { .name = "dma1", .start = 0x0000, .end = 0x000f }, { .name = "dma low page", .start = 0x0080, .end = 0x008f }, { .name = "dma2", .start = 0x00c0, .end = 0x00df }, { .name = "dma high page", .start = 0x0480, .end = 0x048f } }; static dma_t isa_dma[8]; /* * ISA DMA always starts at channel 0 */ void __init isa_init_dma(void) { /* * Try to autodetect presence of an ISA DMA controller. * We do some minimal initialisation, and check that * channel 0's DMA address registers are writeable. */ outb(0xff, 0x0d); outb(0xff, 0xda); /* * Write high and low address, and then read them back * in the same order. */ outb(0x55, 0x00); outb(0xaa, 0x00); if (inb(0) == 0x55 && inb(0) == 0xaa) { unsigned int chan, i; for (chan = 0; chan < 8; chan++) { isa_dma[chan].d_ops = &isa_dma_ops; isa_disable_dma(chan, NULL); } outb(0x40, 0x0b); outb(0x41, 0x0b); outb(0x42, 0x0b); outb(0x43, 0x0b); outb(0xc0, 0xd6); outb(0x41, 0xd6); outb(0x42, 0xd6); outb(0x43, 0xd6); outb(0, 0xd4); outb(0x10, 0x08); outb(0x10, 0xd0); /* * Is this correct? According to my documentation, it * doesn't appear to be. It should be: * outb(0x3f, 0x40b); outb(0x3f, 0x4d6); */ outb(0x30, 0x40b); outb(0x31, 0x40b); outb(0x32, 0x40b); outb(0x33, 0x40b); outb(0x31, 0x4d6); outb(0x32, 0x4d6); outb(0x33, 0x4d6); for (i = 0; i < ARRAY_SIZE(dma_resources); i++) request_resource(&ioport_resource, dma_resources + i); for (chan = 0; chan < 8; chan++) { int ret = isa_dma_add(chan, &isa_dma[chan]); if (ret) pr_err("ISADMA%u: unable to register: %d\n", chan, ret); } request_dma(DMA_ISA_CASCADE, "cascade"); } }
gpl-2.0
Jackeagle/kernel
drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
2562
12283
#include <core/client.h> #include <core/os.h> #include <core/class.h> #include <core/engctx.h> #include <core/handle.h> #include <core/enum.h> #include <subdev/timer.h> #include <subdev/fb.h> #include <engine/graph.h> #include <engine/fifo.h> #include "nv20.h" #include "regs.h" /******************************************************************************* * Graphics object classes ******************************************************************************/ static struct nouveau_oclass nv20_graph_sclass[] = { { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */ { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */ { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */ { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */ { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */ { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */ { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */ { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */ { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */ { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */ { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */ { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */ { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */ { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */ { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */ {}, }; /******************************************************************************* * PGRAPH context ******************************************************************************/ static int nv20_graph_context_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv20_graph_chan *chan; int ret, i; ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); *pobject = nv_object(chan); if (ret) return ret; chan->chid = nouveau_fifo_chan(parent)->chid; nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); nv_wo32(chan, 0x033c, 0xffff0000); nv_wo32(chan, 0x03a0, 0x0fff0000); nv_wo32(chan, 0x03a4, 0x0fff0000); nv_wo32(chan, 0x047c, 0x00000101); nv_wo32(chan, 0x0490, 0x00000111); nv_wo32(chan, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) nv_wo32(chan, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) nv_wo32(chan, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) nv_wo32(chan, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) nv_wo32(chan, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) nv_wo32(chan, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) nv_wo32(chan, i, 0x07ff0000); nv_wo32(chan, 0x05a4, 0x4b7fffff); nv_wo32(chan, 0x05fc, 0x00000001); nv_wo32(chan, 0x0604, 0x00004000); nv_wo32(chan, 0x0610, 0x00000001); nv_wo32(chan, 0x0618, 0x00040000); nv_wo32(chan, 0x061c, 0x00010000); for (i = 0x1c1c; i <= 0x248c; i += 16) { nv_wo32(chan, (i + 0), 0x10700ff9); nv_wo32(chan, (i + 4), 0x0436086c); nv_wo32(chan, (i + 8), 0x000c001b); } nv_wo32(chan, 0x281c, 0x3f800000); nv_wo32(chan, 0x2830, 0x3f800000); nv_wo32(chan, 0x285c, 0x40000000); nv_wo32(chan, 0x2860, 0x3f800000); nv_wo32(chan, 0x2864, 0x3f000000); nv_wo32(chan, 0x286c, 0x40000000); nv_wo32(chan, 0x2870, 0x3f800000); nv_wo32(chan, 0x2878, 0xbf800000); nv_wo32(chan, 0x2880, 0xbf800000); nv_wo32(chan, 0x34a4, 0x000fe000); nv_wo32(chan, 0x3530, 0x000003f8); nv_wo32(chan, 0x3540, 0x002fe000); for (i = 0x355c; i <= 0x3578; i += 4) nv_wo32(chan, i, 0x001c527c); return 0; } int nv20_graph_context_init(struct nouveau_object *object) { struct nv20_graph_priv *priv = (void *)object->engine; struct nv20_graph_chan *chan = (void *)object; int ret; ret = nouveau_graph_context_init(&chan->base); if (ret) return ret; nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4); return 0; } int nv20_graph_context_fini(struct nouveau_object *object, bool suspend) { struct nv20_graph_priv *priv = (void *)object->engine; struct nv20_graph_chan *chan = (void *)object; int chid = -1; nv_mask(priv, 0x400720, 0x00000001, 0x00000000); if (nv_rd32(priv, 0x400144) & 0x00010000) chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24; if (chan->chid == chid) { nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4); nv_wr32(priv, 0x400788, 0x00000002); nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); nv_wr32(priv, 0x400144, 0x10000000); nv_mask(priv, 0x400148, 0xff000000, 0x1f000000); } nv_mask(priv, 0x400720, 0x00000001, 0x00000001); nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000); return nouveau_graph_context_fini(&chan->base, suspend); } static struct nouveau_oclass nv20_graph_cclass = { .handle = NV_ENGCTX(GR, 0x20), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv20_graph_context_ctor, .dtor = _nouveau_graph_context_dtor, .init = nv20_graph_context_init, .fini = nv20_graph_context_fini, .rd32 = _nouveau_graph_context_rd32, .wr32 = _nouveau_graph_context_wr32, }, }; /******************************************************************************* * PGRAPH engine/subdev functions ******************************************************************************/ void nv20_graph_tile_prog(struct nouveau_engine *engine, int i) { struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i]; struct nouveau_fifo *pfifo = nouveau_fifo(engine); struct nv20_graph_priv *priv = (void *)engine; unsigned long flags; pfifo->pause(pfifo, &flags); nv04_graph_idle(priv); nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); if (nv_device(engine)->chipset != 0x34) { nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); } pfifo->start(pfifo, &flags); } void nv20_graph_intr(struct nouveau_subdev *subdev) { struct nouveau_engine *engine = nv_engine(subdev); struct nouveau_object *engctx; struct nouveau_handle *handle; struct nv20_graph_priv *priv = (void *)subdev; u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR); u32 chid = (addr & 0x01f00000) >> 20; u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff; u32 show = stat; engctx = nouveau_engctx_get(engine, chid); if (stat & NV_PGRAPH_INTR_ERROR) { if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { handle = nouveau_handle_get_class(engctx, class); if (handle && !nv_call(handle->object, mthd, data)) show &= ~NV_PGRAPH_INTR_ERROR; nouveau_handle_put(handle); } } nv_wr32(priv, NV03_PGRAPH_INTR, stat); nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); if (show) { nv_error(priv, "%s", ""); nouveau_bitfield_print(nv10_graph_intr_name, show); pr_cont(" nsource:"); nouveau_bitfield_print(nv04_graph_nsource, nsource); pr_cont(" nstatus:"); nouveau_bitfield_print(nv10_graph_nstatus, nstatus); pr_cont("\n"); nv_error(priv, "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", chid, nouveau_client_name(engctx), subc, class, mthd, data); } nouveau_engctx_put(engctx); } static int nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv20_graph_priv *priv; int ret; ret = nouveau_graph_create(parent, engine, oclass, true, &priv); *pobject = nv_object(priv); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); if (ret) return ret; nv_subdev(priv)->unit = 0x00001000; nv_subdev(priv)->intr = nv20_graph_intr; nv_engine(priv)->cclass = &nv20_graph_cclass; nv_engine(priv)->sclass = nv20_graph_sclass; nv_engine(priv)->tile_prog = nv20_graph_tile_prog; return 0; } void nv20_graph_dtor(struct nouveau_object *object) { struct nv20_graph_priv *priv = (void *)object; nouveau_gpuobj_ref(NULL, &priv->ctxtab); nouveau_graph_destroy(&priv->base); } int nv20_graph_init(struct nouveau_object *object) { struct nouveau_engine *engine = nv_engine(object); struct nv20_graph_priv *priv = (void *)engine; struct nouveau_fb *pfb = nouveau_fb(object); u32 tmp, vramsz; int ret, i; ret = nouveau_graph_init(&priv->base); if (ret) return ret; nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4); if (nv_device(priv)->chipset == 0x20) { nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000); for (i = 0; i < 15; i++) nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000); nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); } else { nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000); for (i = 0; i < 32; i++) nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000); nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); } nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700); nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000); nv_wr32(priv, 0x40009C , 0x00000040); if (nv_device(priv)->chipset >= 0x25) { nv_wr32(priv, 0x400890, 0x00a8cfff); nv_wr32(priv, 0x400610, 0x304B1FB6); nv_wr32(priv, 0x400B80, 0x1cbd3883); nv_wr32(priv, 0x400B84, 0x44000000); nv_wr32(priv, 0x400098, 0x40000080); nv_wr32(priv, 0x400B88, 0x000000ff); } else { nv_wr32(priv, 0x400880, 0x0008c7df); nv_wr32(priv, 0x400094, 0x00000005); nv_wr32(priv, 0x400B80, 0x45eae20e); nv_wr32(priv, 0x400B84, 0x24000000); nv_wr32(priv, 0x400098, 0x00000040); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038); nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038); nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030); } /* Turn all the tiling regions off. */ for (i = 0; i < pfb->tile.regions; i++) engine->tile_prog(engine, i); nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324)); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324)); nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00; nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp); tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100; nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp); /* begin RAM config */ vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1; nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200)); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204)); nv_wr32(priv, 0x400820, 0); nv_wr32(priv, 0x400824, 0); nv_wr32(priv, 0x400864, vramsz - 1); nv_wr32(priv, 0x400868, vramsz - 1); /* interesting.. the below overwrites some of the tile setup above.. */ nv_wr32(priv, 0x400B20, 0x00000000); nv_wr32(priv, 0x400B04, 0xFFFFFFFF); nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); return 0; } struct nouveau_oclass nv20_graph_oclass = { .handle = NV_ENGINE(GR, 0x20), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv20_graph_ctor, .dtor = nv20_graph_dtor, .init = nv20_graph_init, .fini = _nouveau_graph_fini, }, };
gpl-2.0
Tasssadar/kernel_nexus
arch/arm/mm/nommu.c
2818
2262
/* * linux/arch/arm/mm/nommu.c * * ARM uCLinux supporting functions. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/io.h> #include <linux/memblock.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include "mm.h" void __init arm_mm_memblock_reserve(void) { /* * Register the exception vector page. * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } void __init sanity_check_meminfo(void) { } /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ void __init paging_init(struct machine_desc *mdesc) { bootmem_init(); } /* * We don't need to do anything here for nommu machines. */ void setup_mm_for_reboot(char mode) { } void flush_dcache_page(struct page *page) { __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } EXPORT_SYMBOL(flush_dcache_page); void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) { memcpy(dst, src, len); if (vma->vm_flags & VM_EXEC) __cpuc_coherent_user_range(uaddr, uaddr + len); } void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) { if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) return NULL; return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); } EXPORT_SYMBOL(__arm_ioremap_pfn); void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { return __arm_ioremap_pfn(pfn, offset, size, mtype); } void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { return (void __iomem *)phys_addr; } EXPORT_SYMBOL(__arm_ioremap); void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, unsigned int mtype, void *caller) { return __arm_ioremap(phys_addr, size, mtype); } void __iounmap(volatile void __iomem *addr) { } EXPORT_SYMBOL(__iounmap);
gpl-2.0
altcrauer/linux
arch/mips/mti-sead3/sead3-console.c
4098
1177
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> #include <linux/console.h> #include <linux/serial_reg.h> #include <linux/io.h> #define SEAD_UART1_REGS_BASE 0xbf000800 /* ttyS1 = DB9 port */ #define SEAD_UART0_REGS_BASE 0xbf000900 /* ttyS0 = USB port */ #define PORT(base_addr, offset) ((unsigned int __iomem *)(base_addr+(offset)*4)) static char console_port = 1; static inline unsigned int serial_in(int offset, unsigned int base_addr) { return __raw_readl(PORT(base_addr, offset)) & 0xff; } static inline void serial_out(int offset, int value, unsigned int base_addr) { __raw_writel(value, PORT(base_addr, offset)); } void __init fw_init_early_console(char port) { console_port = port; } int prom_putchar(char c) { unsigned int base_addr; base_addr = console_port ? SEAD_UART1_REGS_BASE : SEAD_UART0_REGS_BASE; while ((serial_in(UART_LSR, base_addr) & UART_LSR_THRE) == 0) ; serial_out(UART_TX, c, base_addr); return 1; }
gpl-2.0
heptalium/rpi-sources-3.2
tools/perf/util/ctype.c
4354
1466
/* * Sane locale-independent, ASCII ctype. * * No surprises, and works with signed and unsigned chars. */ #include "cache.h" enum { S = GIT_SPACE, A = GIT_ALPHA, D = GIT_DIGIT, G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */ PS = GIT_SPACE | GIT_PRINT_EXTRA, }; unsigned char sane_ctype[256] = { /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */ /* Nothing in the 128.. range */ }; const char *graph_line = "_____________________________________________________________________" "_____________________________________________________________________"; const char *graph_dotted_line = "---------------------------------------------------------------------" "---------------------------------------------------------------------" "---------------------------------------------------------------------";
gpl-2.0
armani-dev/kernel_test
fs/nilfs2/dat.c
4866
13261
/* * dat.c - NILFS disk address translation. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. */ #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/string.h> #include <linux/errno.h> #include "nilfs.h" #include "mdt.h" #include "alloc.h" #include "dat.h" #define NILFS_CNO_MIN ((__u64)1) #define NILFS_CNO_MAX (~(__u64)0) struct nilfs_dat_info { struct nilfs_mdt_info mi; struct nilfs_palloc_cache palloc_cache; struct nilfs_shadow_map shadow; }; static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat) { return (struct nilfs_dat_info *)NILFS_MDT(dat); } static int nilfs_dat_prepare_entry(struct inode *dat, struct nilfs_palloc_req *req, int create) { return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, create, &req->pr_entry_bh); } static void nilfs_dat_commit_entry(struct inode *dat, struct nilfs_palloc_req *req) { mark_buffer_dirty(req->pr_entry_bh); nilfs_mdt_mark_dirty(dat); brelse(req->pr_entry_bh); } static void nilfs_dat_abort_entry(struct inode *dat, struct nilfs_palloc_req *req) { brelse(req->pr_entry_bh); } int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) { int ret; ret = nilfs_palloc_prepare_alloc_entry(dat, req); if (ret < 0) return ret; ret = nilfs_dat_prepare_entry(dat, req, 1); if (ret < 0) nilfs_palloc_abort_alloc_entry(dat, req); return ret; } void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MAX); entry->de_blocknr = cpu_to_le64(0); kunmap_atomic(kaddr); nilfs_palloc_commit_alloc_entry(dat, req); nilfs_dat_commit_entry(dat, req); } void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req) { nilfs_dat_abort_entry(dat, req); nilfs_palloc_abort_alloc_entry(dat, req); } static void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MIN); entry->de_blocknr = cpu_to_le64(0); kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); nilfs_palloc_commit_free_entry(dat, req); } int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) { int ret; ret = nilfs_dat_prepare_entry(dat, req, 0); WARN_ON(ret == -ENOENT); return ret; } void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, sector_t blocknr) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); } int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; __u64 start; sector_t blocknr; void *kaddr; int ret; ret = nilfs_dat_prepare_entry(dat, req, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); return ret; } kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr); if (blocknr == 0) { ret = nilfs_palloc_prepare_free_entry(dat, req); if (ret < 0) { nilfs_dat_abort_entry(dat, req); return ret; } } return 0; } void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, int dead) { struct nilfs_dat_entry *entry; __u64 start, end; sector_t blocknr; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); end = start = le64_to_cpu(entry->de_start); if (!dead) { end = nilfs_mdt_cno(dat); WARN_ON(start > end); } entry->de_end = cpu_to_le64(end); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr); if (blocknr == 0) nilfs_dat_commit_free(dat, req); else nilfs_dat_commit_entry(dat, req); } void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; __u64 start; sector_t blocknr; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr); if (start == nilfs_mdt_cno(dat) && blocknr == 0) nilfs_palloc_abort_free_entry(dat, req); nilfs_dat_abort_entry(dat, req); } int nilfs_dat_prepare_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq) { int ret; ret = nilfs_dat_prepare_end(dat, oldreq); if (!ret) { ret = nilfs_dat_prepare_alloc(dat, newreq); if (ret < 0) nilfs_dat_abort_end(dat, oldreq); } return ret; } void nilfs_dat_commit_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq, int dead) { nilfs_dat_commit_end(dat, oldreq, dead); nilfs_dat_commit_alloc(dat, newreq); } void nilfs_dat_abort_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq) { nilfs_dat_abort_end(dat, oldreq); nilfs_dat_abort_alloc(dat, newreq); } /** * nilfs_dat_mark_dirty - * @dat: DAT file inode * @vblocknr: virtual block number * * Description: * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) { struct nilfs_palloc_req req; int ret; req.pr_entry_nr = vblocknr; ret = nilfs_dat_prepare_entry(dat, &req, 0); if (ret == 0) nilfs_dat_commit_entry(dat, &req); return ret; } /** * nilfs_dat_freev - free virtual block numbers * @dat: DAT file inode * @vblocknrs: array of virtual block numbers * @nitems: number of virtual block numbers * * Description: nilfs_dat_freev() frees the virtual block numbers specified by * @vblocknrs and @nitems. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The virtual block number have not been allocated. */ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) { return nilfs_palloc_freev(dat, vblocknrs, nitems); } /** * nilfs_dat_move - change a block number * @dat: DAT file inode * @vblocknr: virtual block number * @blocknr: block number * * Description: nilfs_dat_move() changes the block number associated with * @vblocknr to @blocknr. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; void *kaddr; int ret; ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); if (ret < 0) return ret; /* * The given disk block number (blocknr) is not yet written to * the device at this point. * * To prevent nilfs_dat_translate() from returning the * uncommitted block number, this makes a copy of the entry * buffer and redirects nilfs_dat_translate() to the copy. */ if (!buffer_nilfs_redirected(entry_bh)) { ret = nilfs_mdt_freeze_buffer(dat, entry_bh); if (ret) { brelse(entry_bh); return ret; } } kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, (unsigned long long)vblocknr, (unsigned long long)le64_to_cpu(entry->de_start), (unsigned long long)le64_to_cpu(entry->de_end)); kunmap_atomic(kaddr); brelse(entry_bh); return -EINVAL; } WARN_ON(blocknr == 0); entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr); mark_buffer_dirty(entry_bh); nilfs_mdt_mark_dirty(dat); brelse(entry_bh); return 0; } /** * nilfs_dat_translate - translate a virtual block number to a block number * @dat: DAT file inode * @vblocknr: virtual block number * @blocknrp: pointer to a block number * * Description: nilfs_dat_translate() maps the virtual block number @vblocknr * to the corresponding block number. * * Return Value: On success, 0 is returned and the block number associated * with @vblocknr is stored in the place pointed by @blocknrp. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A block number associated with @vblocknr does not exist. */ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) { struct buffer_head *entry_bh, *bh; struct nilfs_dat_entry *entry; sector_t blocknr; void *kaddr; int ret; ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); if (ret < 0) return ret; if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) { bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh); if (bh) { WARN_ON(!buffer_uptodate(bh)); brelse(entry_bh); entry_bh = bh; } } kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); blocknr = le64_to_cpu(entry->de_blocknr); if (blocknr == 0) { ret = -ENOENT; goto out; } *blocknrp = blocknr; out: kunmap_atomic(kaddr); brelse(entry_bh); return ret; } ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, size_t nvi) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; struct nilfs_vinfo *vinfo = buf; __u64 first, last; void *kaddr; unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block; int i, j, n, ret; for (i = 0; i < nvi; i += n) { ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr, 0, &entry_bh); if (ret < 0) return ret; kaddr = kmap_atomic(entry_bh->b_page); /* last virtual block number in this block */ first = vinfo->vi_vblocknr; do_div(first, entries_per_block); first *= entries_per_block; last = first + entries_per_block - 1; for (j = i, n = 0; j < nvi && vinfo->vi_vblocknr >= first && vinfo->vi_vblocknr <= last; j++, n++, vinfo = (void *)vinfo + visz) { entry = nilfs_palloc_block_get_entry( dat, vinfo->vi_vblocknr, entry_bh, kaddr); vinfo->vi_start = le64_to_cpu(entry->de_start); vinfo->vi_end = le64_to_cpu(entry->de_end); vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); } kunmap_atomic(kaddr); brelse(entry_bh); } return nvi; } /** * nilfs_dat_read - read or get dat inode * @sb: super block instance * @entry_size: size of a dat entry * @raw_inode: on-disk dat inode * @inodep: buffer to store the inode */ int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep) { static struct lock_class_key dat_lock_key; struct inode *dat; struct nilfs_dat_info *di; int err; dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO); if (unlikely(!dat)) return -ENOMEM; if (!(dat->i_state & I_NEW)) goto out; err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di)); if (err) goto failed; err = nilfs_palloc_init_blockgroup(dat, entry_size); if (err) goto failed; di = NILFS_DAT_I(dat); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); nilfs_palloc_setup_cache(dat, &di->palloc_cache); nilfs_mdt_setup_shadow_map(dat, &di->shadow); err = nilfs_read_inode_common(dat, raw_inode); if (err) goto failed; unlock_new_inode(dat); out: *inodep = dat; return 0; failed: iget_failed(dat); return err; }
gpl-2.0
garwedgess/LuPuS_honami_stock
drivers/media/video/mt9t001.c
4866
23073
/* * Driver for MT9T001 CMOS Image Sensor from Aptina (Micron) * * Copyright (C) 2010-2011, Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * Based on the MT9M001 driver, * * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/module.h> #include <linux/log2.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/v4l2-mediabus.h> #include <media/mt9t001.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #define MT9T001_PIXEL_ARRAY_HEIGHT 1568 #define MT9T001_PIXEL_ARRAY_WIDTH 2112 #define MT9T001_CHIP_VERSION 0x00 #define MT9T001_CHIP_ID 0x1621 #define MT9T001_ROW_START 0x01 #define MT9T001_ROW_START_MIN 0 #define MT9T001_ROW_START_DEF 20 #define MT9T001_ROW_START_MAX 1534 #define MT9T001_COLUMN_START 0x02 #define MT9T001_COLUMN_START_MIN 0 #define MT9T001_COLUMN_START_DEF 32 #define MT9T001_COLUMN_START_MAX 2046 #define MT9T001_WINDOW_HEIGHT 0x03 #define MT9T001_WINDOW_HEIGHT_MIN 1 #define MT9T001_WINDOW_HEIGHT_DEF 1535 #define MT9T001_WINDOW_HEIGHT_MAX 1567 #define MT9T001_WINDOW_WIDTH 0x04 #define MT9T001_WINDOW_WIDTH_MIN 1 #define MT9T001_WINDOW_WIDTH_DEF 2047 #define MT9T001_WINDOW_WIDTH_MAX 2111 #define MT9T001_HORIZONTAL_BLANKING 0x05 #define MT9T001_HORIZONTAL_BLANKING_MIN 21 #define MT9T001_HORIZONTAL_BLANKING_MAX 1023 #define MT9T001_VERTICAL_BLANKING 0x06 #define MT9T001_VERTICAL_BLANKING_MIN 3 #define MT9T001_VERTICAL_BLANKING_MAX 1023 #define MT9T001_OUTPUT_CONTROL 0x07 #define MT9T001_OUTPUT_CONTROL_SYNC (1 << 0) #define MT9T001_OUTPUT_CONTROL_CHIP_ENABLE (1 << 1) #define MT9T001_OUTPUT_CONTROL_TEST_DATA (1 << 6) #define MT9T001_SHUTTER_WIDTH_HIGH 0x08 #define MT9T001_SHUTTER_WIDTH_LOW 0x09 #define MT9T001_SHUTTER_WIDTH_MIN 1 #define MT9T001_SHUTTER_WIDTH_DEF 1561 #define MT9T001_SHUTTER_WIDTH_MAX (1024 * 1024) #define MT9T001_PIXEL_CLOCK 0x0a #define MT9T001_PIXEL_CLOCK_INVERT (1 << 15) #define MT9T001_PIXEL_CLOCK_SHIFT_MASK (7 << 8) #define MT9T001_PIXEL_CLOCK_SHIFT_SHIFT 8 #define MT9T001_PIXEL_CLOCK_DIVIDE_MASK (0x7f << 0) #define MT9T001_FRAME_RESTART 0x0b #define MT9T001_SHUTTER_DELAY 0x0c #define MT9T001_SHUTTER_DELAY_MAX 2047 #define MT9T001_RESET 0x0d #define MT9T001_READ_MODE1 0x1e #define MT9T001_READ_MODE_SNAPSHOT (1 << 8) #define MT9T001_READ_MODE_STROBE_ENABLE (1 << 9) #define MT9T001_READ_MODE_STROBE_WIDTH (1 << 10) #define MT9T001_READ_MODE_STROBE_OVERRIDE (1 << 11) #define MT9T001_READ_MODE2 0x20 #define MT9T001_READ_MODE_BAD_FRAMES (1 << 0) #define MT9T001_READ_MODE_LINE_VALID_CONTINUOUS (1 << 9) #define MT9T001_READ_MODE_LINE_VALID_FRAME (1 << 10) #define MT9T001_READ_MODE3 0x21 #define MT9T001_READ_MODE_GLOBAL_RESET (1 << 0) #define MT9T001_READ_MODE_GHST_CTL (1 << 1) #define MT9T001_ROW_ADDRESS_MODE 0x22 #define MT9T001_ROW_SKIP_MASK (7 << 0) #define MT9T001_ROW_BIN_MASK (3 << 3) #define MT9T001_ROW_BIN_SHIFT 3 #define MT9T001_COLUMN_ADDRESS_MODE 0x23 #define MT9T001_COLUMN_SKIP_MASK (7 << 0) #define MT9T001_COLUMN_BIN_MASK (3 << 3) #define MT9T001_COLUMN_BIN_SHIFT 3 #define MT9T001_GREEN1_GAIN 0x2b #define MT9T001_BLUE_GAIN 0x2c #define MT9T001_RED_GAIN 0x2d #define MT9T001_GREEN2_GAIN 0x2e #define MT9T001_TEST_DATA 0x32 #define MT9T001_GLOBAL_GAIN 0x35 #define MT9T001_GLOBAL_GAIN_MIN 8 #define MT9T001_GLOBAL_GAIN_MAX 1024 #define MT9T001_BLACK_LEVEL 0x49 #define MT9T001_ROW_BLACK_DEFAULT_OFFSET 0x4b #define MT9T001_BLC_DELTA_THRESHOLDS 0x5d #define MT9T001_CAL_THRESHOLDS 0x5f #define MT9T001_GREEN1_OFFSET 0x60 #define MT9T001_GREEN2_OFFSET 0x61 #define MT9T001_BLACK_LEVEL_CALIBRATION 0x62 #define MT9T001_BLACK_LEVEL_OVERRIDE (1 << 0) #define MT9T001_BLACK_LEVEL_DISABLE_OFFSET (1 << 1) #define MT9T001_BLACK_LEVEL_RECALCULATE (1 << 12) #define MT9T001_BLACK_LEVEL_LOCK_RED_BLUE (1 << 13) #define MT9T001_BLACK_LEVEL_LOCK_GREEN (1 << 14) #define MT9T001_RED_OFFSET 0x63 #define MT9T001_BLUE_OFFSET 0x64 struct mt9t001 { struct v4l2_subdev subdev; struct media_pad pad; struct v4l2_mbus_framefmt format; struct v4l2_rect crop; struct v4l2_ctrl_handler ctrls; struct v4l2_ctrl *gains[4]; u16 output_control; u16 black_level; }; static inline struct mt9t001 *to_mt9t001(struct v4l2_subdev *sd) { return container_of(sd, struct mt9t001, subdev); } static int mt9t001_read(struct i2c_client *client, u8 reg) { return i2c_smbus_read_word_swapped(client, reg); } static int mt9t001_write(struct i2c_client *client, u8 reg, u16 data) { return i2c_smbus_write_word_swapped(client, reg, data); } static int mt9t001_set_output_control(struct mt9t001 *mt9t001, u16 clear, u16 set) { struct i2c_client *client = v4l2_get_subdevdata(&mt9t001->subdev); u16 value = (mt9t001->output_control & ~clear) | set; int ret; if (value == mt9t001->output_control) return 0; ret = mt9t001_write(client, MT9T001_OUTPUT_CONTROL, value); if (ret < 0) return ret; mt9t001->output_control = value; return 0; } /* ----------------------------------------------------------------------------- * V4L2 subdev video operations */ static struct v4l2_mbus_framefmt * __mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh, unsigned int pad, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_TRY: return v4l2_subdev_get_try_format(fh, pad); case V4L2_SUBDEV_FORMAT_ACTIVE: return &mt9t001->format; default: return NULL; } } static struct v4l2_rect * __mt9t001_get_pad_crop(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh, unsigned int pad, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_TRY: return v4l2_subdev_get_try_crop(fh, pad); case V4L2_SUBDEV_FORMAT_ACTIVE: return &mt9t001->crop; default: return NULL; } } static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable) { const u16 mode = MT9T001_OUTPUT_CONTROL_CHIP_ENABLE; struct i2c_client *client = v4l2_get_subdevdata(subdev); struct mt9t001 *mt9t001 = to_mt9t001(subdev); struct v4l2_mbus_framefmt *format = &mt9t001->format; struct v4l2_rect *crop = &mt9t001->crop; unsigned int hratio; unsigned int vratio; int ret; if (!enable) return mt9t001_set_output_control(mt9t001, mode, 0); /* Configure the window size and row/column bin */ hratio = DIV_ROUND_CLOSEST(crop->width, format->width); vratio = DIV_ROUND_CLOSEST(crop->height, format->height); ret = mt9t001_write(client, MT9T001_ROW_ADDRESS_MODE, hratio - 1); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_COLUMN_ADDRESS_MODE, vratio - 1); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_COLUMN_START, crop->left); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_ROW_START, crop->top); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_WINDOW_WIDTH, crop->width - 1); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_WINDOW_HEIGHT, crop->height - 1); if (ret < 0) return ret; /* Switch to master "normal" mode */ return mt9t001_set_output_control(mt9t001, 0, mode); } static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = V4L2_MBUS_FMT_SGRBG10_1X10; return 0; } static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = (MT9T001_WINDOW_WIDTH_DEF + 1) / fse->index; fse->max_width = fse->min_width; fse->min_height = (MT9T001_WINDOW_HEIGHT_DEF + 1) / fse->index; fse->max_height = fse->min_height; return 0; } static int mt9t001_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *format) { struct mt9t001 *mt9t001 = to_mt9t001(subdev); format->format = *__mt9t001_get_pad_format(mt9t001, fh, format->pad, format->which); return 0; } static int mt9t001_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *format) { struct mt9t001 *mt9t001 = to_mt9t001(subdev); struct v4l2_mbus_framefmt *__format; struct v4l2_rect *__crop; unsigned int width; unsigned int height; unsigned int hratio; unsigned int vratio; __crop = __mt9t001_get_pad_crop(mt9t001, fh, format->pad, format->which); /* Clamp the width and height to avoid dividing by zero. */ width = clamp_t(unsigned int, ALIGN(format->format.width, 2), max(__crop->width / 8, MT9T001_WINDOW_HEIGHT_MIN + 1), __crop->width); height = clamp_t(unsigned int, ALIGN(format->format.height, 2), max(__crop->height / 8, MT9T001_WINDOW_HEIGHT_MIN + 1), __crop->height); hratio = DIV_ROUND_CLOSEST(__crop->width, width); vratio = DIV_ROUND_CLOSEST(__crop->height, height); __format = __mt9t001_get_pad_format(mt9t001, fh, format->pad, format->which); __format->width = __crop->width / hratio; __format->height = __crop->height / vratio; format->format = *__format; return 0; } static int mt9t001_get_crop(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_crop *crop) { struct mt9t001 *mt9t001 = to_mt9t001(subdev); crop->rect = *__mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which); return 0; } static int mt9t001_set_crop(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_crop *crop) { struct mt9t001 *mt9t001 = to_mt9t001(subdev); struct v4l2_mbus_framefmt *__format; struct v4l2_rect *__crop; struct v4l2_rect rect; /* Clamp the crop rectangle boundaries and align them to a multiple of 2 * pixels. */ rect.left = clamp(ALIGN(crop->rect.left, 2), MT9T001_COLUMN_START_MIN, MT9T001_COLUMN_START_MAX); rect.top = clamp(ALIGN(crop->rect.top, 2), MT9T001_ROW_START_MIN, MT9T001_ROW_START_MAX); rect.width = clamp(ALIGN(crop->rect.width, 2), MT9T001_WINDOW_WIDTH_MIN + 1, MT9T001_WINDOW_WIDTH_MAX + 1); rect.height = clamp(ALIGN(crop->rect.height, 2), MT9T001_WINDOW_HEIGHT_MIN + 1, MT9T001_WINDOW_HEIGHT_MAX + 1); rect.width = min(rect.width, MT9T001_PIXEL_ARRAY_WIDTH - rect.left); rect.height = min(rect.height, MT9T001_PIXEL_ARRAY_HEIGHT - rect.top); __crop = __mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which); if (rect.width != __crop->width || rect.height != __crop->height) { /* Reset the output image size if the crop rectangle size has * been modified. */ __format = __mt9t001_get_pad_format(mt9t001, fh, crop->pad, crop->which); __format->width = rect.width; __format->height = rect.height; } *__crop = rect; crop->rect = rect; return 0; } /* ----------------------------------------------------------------------------- * V4L2 subdev control operations */ #define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001) #define V4L2_CID_BLACK_LEVEL_AUTO (V4L2_CID_USER_BASE | 0x1002) #define V4L2_CID_BLACK_LEVEL_OFFSET (V4L2_CID_USER_BASE | 0x1003) #define V4L2_CID_BLACK_LEVEL_CALIBRATE (V4L2_CID_USER_BASE | 0x1004) #define V4L2_CID_GAIN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1001) #define V4L2_CID_GAIN_GREEN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1002) #define V4L2_CID_GAIN_GREEN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1003) #define V4L2_CID_GAIN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1004) static u16 mt9t001_gain_value(s32 *gain) { /* Gain is controlled by 2 analog stages and a digital stage. Valid * values for the 3 stages are * * Stage Min Max Step * ------------------------------------------ * First analog stage x1 x2 1 * Second analog stage x1 x4 0.125 * Digital stage x1 x16 0.125 * * To minimize noise, the gain stages should be used in the second * analog stage, first analog stage, digital stage order. Gain from a * previous stage should be pushed to its maximum value before the next * stage is used. */ if (*gain <= 32) return *gain; if (*gain <= 64) { *gain &= ~1; return (1 << 6) | (*gain >> 1); } *gain &= ~7; return ((*gain - 64) << 5) | (1 << 6) | 32; } static int mt9t001_ctrl_freeze(struct mt9t001 *mt9t001, bool freeze) { return mt9t001_set_output_control(mt9t001, freeze ? 0 : MT9T001_OUTPUT_CONTROL_SYNC, freeze ? MT9T001_OUTPUT_CONTROL_SYNC : 0); } static int mt9t001_s_ctrl(struct v4l2_ctrl *ctrl) { static const u8 gains[4] = { MT9T001_RED_GAIN, MT9T001_GREEN1_GAIN, MT9T001_GREEN2_GAIN, MT9T001_BLUE_GAIN }; struct mt9t001 *mt9t001 = container_of(ctrl->handler, struct mt9t001, ctrls); struct i2c_client *client = v4l2_get_subdevdata(&mt9t001->subdev); unsigned int count; unsigned int i; u16 value; int ret; switch (ctrl->id) { case V4L2_CID_GAIN_RED: case V4L2_CID_GAIN_GREEN_RED: case V4L2_CID_GAIN_GREEN_BLUE: case V4L2_CID_GAIN_BLUE: /* Disable control updates if more than one control has changed * in the cluster. */ for (i = 0, count = 0; i < 4; ++i) { struct v4l2_ctrl *gain = mt9t001->gains[i]; if (gain->val != gain->cur.val) count++; } if (count > 1) { ret = mt9t001_ctrl_freeze(mt9t001, true); if (ret < 0) return ret; } /* Update the gain controls. */ for (i = 0; i < 4; ++i) { struct v4l2_ctrl *gain = mt9t001->gains[i]; if (gain->val == gain->cur.val) continue; value = mt9t001_gain_value(&gain->val); ret = mt9t001_write(client, gains[i], value); if (ret < 0) { mt9t001_ctrl_freeze(mt9t001, false); return ret; } } /* Enable control updates. */ if (count > 1) { ret = mt9t001_ctrl_freeze(mt9t001, false); if (ret < 0) return ret; } break; case V4L2_CID_EXPOSURE: ret = mt9t001_write(client, MT9T001_SHUTTER_WIDTH_LOW, ctrl->val & 0xffff); if (ret < 0) return ret; return mt9t001_write(client, MT9T001_SHUTTER_WIDTH_HIGH, ctrl->val >> 16); case V4L2_CID_TEST_PATTERN: ret = mt9t001_set_output_control(mt9t001, ctrl->val ? 0 : MT9T001_OUTPUT_CONTROL_TEST_DATA, ctrl->val ? MT9T001_OUTPUT_CONTROL_TEST_DATA : 0); if (ret < 0) return ret; return mt9t001_write(client, MT9T001_TEST_DATA, ctrl->val << 2); case V4L2_CID_BLACK_LEVEL_AUTO: value = ctrl->val ? 0 : MT9T001_BLACK_LEVEL_OVERRIDE; ret = mt9t001_write(client, MT9T001_BLACK_LEVEL_CALIBRATION, value); if (ret < 0) return ret; mt9t001->black_level = value; break; case V4L2_CID_BLACK_LEVEL_OFFSET: ret = mt9t001_write(client, MT9T001_GREEN1_OFFSET, ctrl->val); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_GREEN2_OFFSET, ctrl->val); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_RED_OFFSET, ctrl->val); if (ret < 0) return ret; return mt9t001_write(client, MT9T001_BLUE_OFFSET, ctrl->val); case V4L2_CID_BLACK_LEVEL_CALIBRATE: return mt9t001_write(client, MT9T001_BLACK_LEVEL_CALIBRATION, MT9T001_BLACK_LEVEL_RECALCULATE | mt9t001->black_level); } return 0; } static struct v4l2_ctrl_ops mt9t001_ctrl_ops = { .s_ctrl = mt9t001_s_ctrl, }; static const struct v4l2_ctrl_config mt9t001_ctrls[] = { { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_TEST_PATTERN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Test pattern", .min = 0, .max = 1023, .step = 1, .def = 0, .flags = 0, }, { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_BLACK_LEVEL_AUTO, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Black Level, Auto", .min = 0, .max = 1, .step = 1, .def = 1, .flags = 0, }, { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_BLACK_LEVEL_OFFSET, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Black Level, Offset", .min = -256, .max = 255, .step = 1, .def = 32, .flags = 0, }, { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_BLACK_LEVEL_CALIBRATE, .type = V4L2_CTRL_TYPE_BUTTON, .name = "Black Level, Calibrate", .min = 0, .max = 0, .step = 0, .def = 0, .flags = V4L2_CTRL_FLAG_WRITE_ONLY, }, }; static const struct v4l2_ctrl_config mt9t001_gains[] = { { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_GAIN_RED, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain, Red", .min = MT9T001_GLOBAL_GAIN_MIN, .max = MT9T001_GLOBAL_GAIN_MAX, .step = 1, .def = MT9T001_GLOBAL_GAIN_MIN, .flags = 0, }, { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_GAIN_GREEN_RED, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain, Green (R)", .min = MT9T001_GLOBAL_GAIN_MIN, .max = MT9T001_GLOBAL_GAIN_MAX, .step = 1, .def = MT9T001_GLOBAL_GAIN_MIN, .flags = 0, }, { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_GAIN_GREEN_BLUE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain, Green (B)", .min = MT9T001_GLOBAL_GAIN_MIN, .max = MT9T001_GLOBAL_GAIN_MAX, .step = 1, .def = MT9T001_GLOBAL_GAIN_MIN, .flags = 0, }, { .ops = &mt9t001_ctrl_ops, .id = V4L2_CID_GAIN_BLUE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain, Blue", .min = MT9T001_GLOBAL_GAIN_MIN, .max = MT9T001_GLOBAL_GAIN_MAX, .step = 1, .def = MT9T001_GLOBAL_GAIN_MIN, .flags = 0, }, }; /* ----------------------------------------------------------------------------- * V4L2 subdev internal operations */ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh) { struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; crop = v4l2_subdev_get_try_crop(fh, 0); crop->left = MT9T001_COLUMN_START_DEF; crop->top = MT9T001_ROW_START_DEF; crop->width = MT9T001_WINDOW_WIDTH_DEF + 1; crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1; format = v4l2_subdev_get_try_format(fh, 0); format->code = V4L2_MBUS_FMT_SGRBG10_1X10; format->width = MT9T001_WINDOW_WIDTH_DEF + 1; format->height = MT9T001_WINDOW_HEIGHT_DEF + 1; format->field = V4L2_FIELD_NONE; format->colorspace = V4L2_COLORSPACE_SRGB; return 0; } static struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = { .s_stream = mt9t001_s_stream, }; static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = { .enum_mbus_code = mt9t001_enum_mbus_code, .enum_frame_size = mt9t001_enum_frame_size, .get_fmt = mt9t001_get_format, .set_fmt = mt9t001_set_format, .get_crop = mt9t001_get_crop, .set_crop = mt9t001_set_crop, }; static struct v4l2_subdev_ops mt9t001_subdev_ops = { .video = &mt9t001_subdev_video_ops, .pad = &mt9t001_subdev_pad_ops, }; static struct v4l2_subdev_internal_ops mt9t001_subdev_internal_ops = { .open = mt9t001_open, }; static int mt9t001_video_probe(struct i2c_client *client) { struct mt9t001_platform_data *pdata = client->dev.platform_data; s32 data; int ret; dev_info(&client->dev, "Probing MT9T001 at address 0x%02x\n", client->addr); /* Reset the chip and stop data read out */ ret = mt9t001_write(client, MT9T001_RESET, 1); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_RESET, 0); if (ret < 0) return ret; ret = mt9t001_write(client, MT9T001_OUTPUT_CONTROL, 0); if (ret < 0) return ret; /* Configure the pixel clock polarity */ if (pdata && pdata->clk_pol) { ret = mt9t001_write(client, MT9T001_PIXEL_CLOCK, MT9T001_PIXEL_CLOCK_INVERT); if (ret < 0) return ret; } /* Read and check the sensor version */ data = mt9t001_read(client, MT9T001_CHIP_VERSION); if (data != MT9T001_CHIP_ID) { dev_err(&client->dev, "MT9T001 not detected, wrong version " "0x%04x\n", data); return -ENODEV; } dev_info(&client->dev, "MT9T001 detected at address 0x%02x\n", client->addr); return ret; } static int mt9t001_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9t001 *mt9t001; unsigned int i; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_warn(&client->adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } ret = mt9t001_video_probe(client); if (ret < 0) return ret; mt9t001 = kzalloc(sizeof(*mt9t001), GFP_KERNEL); if (!mt9t001) return -ENOMEM; v4l2_ctrl_handler_init(&mt9t001->ctrls, ARRAY_SIZE(mt9t001_ctrls) + ARRAY_SIZE(mt9t001_gains) + 2); v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops, V4L2_CID_EXPOSURE, MT9T001_SHUTTER_WIDTH_MIN, MT9T001_SHUTTER_WIDTH_MAX, 1, MT9T001_SHUTTER_WIDTH_DEF); v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops, V4L2_CID_BLACK_LEVEL, 1, 1, 1, 1); for (i = 0; i < ARRAY_SIZE(mt9t001_ctrls); ++i) v4l2_ctrl_new_custom(&mt9t001->ctrls, &mt9t001_ctrls[i], NULL); for (i = 0; i < ARRAY_SIZE(mt9t001_gains); ++i) mt9t001->gains[i] = v4l2_ctrl_new_custom(&mt9t001->ctrls, &mt9t001_gains[i], NULL); v4l2_ctrl_cluster(ARRAY_SIZE(mt9t001_gains), mt9t001->gains); mt9t001->subdev.ctrl_handler = &mt9t001->ctrls; if (mt9t001->ctrls.error) { printk(KERN_INFO "%s: control initialization error %d\n", __func__, mt9t001->ctrls.error); ret = -EINVAL; goto done; } mt9t001->crop.left = MT9T001_COLUMN_START_DEF; mt9t001->crop.top = MT9T001_ROW_START_DEF; mt9t001->crop.width = MT9T001_WINDOW_WIDTH_DEF + 1; mt9t001->crop.height = MT9T001_WINDOW_HEIGHT_DEF + 1; mt9t001->format.code = V4L2_MBUS_FMT_SGRBG10_1X10; mt9t001->format.width = MT9T001_WINDOW_WIDTH_DEF + 1; mt9t001->format.height = MT9T001_WINDOW_HEIGHT_DEF + 1; mt9t001->format.field = V4L2_FIELD_NONE; mt9t001->format.colorspace = V4L2_COLORSPACE_SRGB; v4l2_i2c_subdev_init(&mt9t001->subdev, client, &mt9t001_subdev_ops); mt9t001->subdev.internal_ops = &mt9t001_subdev_internal_ops; mt9t001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; mt9t001->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&mt9t001->subdev.entity, 1, &mt9t001->pad, 0); done: if (ret < 0) { v4l2_ctrl_handler_free(&mt9t001->ctrls); media_entity_cleanup(&mt9t001->subdev.entity); kfree(mt9t001); } return ret; } static int mt9t001_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct mt9t001 *mt9t001 = to_mt9t001(subdev); v4l2_ctrl_handler_free(&mt9t001->ctrls); v4l2_device_unregister_subdev(subdev); media_entity_cleanup(&subdev->entity); kfree(mt9t001); return 0; } static const struct i2c_device_id mt9t001_id[] = { { "mt9t001", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mt9t001_id); static struct i2c_driver mt9t001_driver = { .driver = { .name = "mt9t001", }, .probe = mt9t001_probe, .remove = mt9t001_remove, .id_table = mt9t001_id, }; module_i2c_driver(mt9t001_driver); MODULE_DESCRIPTION("Aptina (Micron) MT9T001 Camera driver"); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); MODULE_LICENSE("GPL");
gpl-2.0
CyanideL/android_kernel_samsung_jf
fs/nilfs2/sufile.c
4866
24904
/* * sufile.c - NILFS segment usage file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. * Revised by Ryusuke Konishi <ryusuke@osrg.net>. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/errno.h> #include <linux/nilfs2_fs.h> #include "mdt.h" #include "sufile.h" struct nilfs_sufile_info { struct nilfs_mdt_info mi; unsigned long ncleansegs;/* number of clean segments */ __u64 allocmin; /* lower limit of allocatable segment range */ __u64 allocmax; /* upper limit of allocatable segment range */ }; static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) { return (struct nilfs_sufile_info *)NILFS_MDT(sufile); } static inline unsigned long nilfs_sufile_segment_usages_per_block(const struct inode *sufile) { return NILFS_MDT(sufile)->mi_entries_per_block; } static unsigned long nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) { __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); return (unsigned long)t; } static unsigned long nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) { __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); } static unsigned long nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, __u64 max) { return min_t(unsigned long, nilfs_sufile_segment_usages_per_block(sufile) - nilfs_sufile_get_offset(sufile, curr), max - curr + 1); } static struct nilfs_segment_usage * nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, struct buffer_head *bh, void *kaddr) { return kaddr + bh_offset(bh) + nilfs_sufile_get_offset(sufile, segnum) * NILFS_MDT(sufile)->mi_entry_size; } static inline int nilfs_sufile_get_header_block(struct inode *sufile, struct buffer_head **bhp) { return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); } static inline int nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, int create, struct buffer_head **bhp) { return nilfs_mdt_get_block(sufile, nilfs_sufile_get_blkoff(sufile, segnum), create, NULL, bhp); } static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, __u64 segnum) { return nilfs_mdt_delete_block(sufile, nilfs_sufile_get_blkoff(sufile, segnum)); } static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, u64 ncleanadd, u64 ndirtyadd) { struct nilfs_sufile_header *header; void *kaddr; kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); le64_add_cpu(&header->sh_ncleansegs, ncleanadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); } /** * nilfs_sufile_get_ncleansegs - return the number of clean segments * @sufile: inode of segment usage file */ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) { return NILFS_SUI(sufile)->ncleansegs; } /** * nilfs_sufile_updatev - modify multiple segment usages at a time * @sufile: inode of segment usage file * @segnumv: array of segment numbers * @nsegs: size of @segnumv array * @create: creation flag * @ndone: place to store number of modified segments on @segnumv * @dofunc: primitive operation for the update * * Description: nilfs_sufile_updatev() repeatedly calls @dofunc * against the given array of segments. The @dofunc is called with * buffers of a header block and the sufile block in which the target * segment usage entry is contained. If @ndone is given, the number * of successfully modified segments from the head is stored in the * place @ndone points to. * * Return Value: On success, zero is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - Given segment usage is in hole block (may be returned if * @create is zero) * * %-EINVAL - Invalid segment usage number */ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, int create, size_t *ndone, void (*dofunc)(struct inode *, __u64, struct buffer_head *, struct buffer_head *)) { struct buffer_head *header_bh, *bh; unsigned long blkoff, prev_blkoff; __u64 *seg; size_t nerr = 0, n = 0; int ret = 0; if (unlikely(nsegs == 0)) goto out; down_write(&NILFS_MDT(sufile)->mi_sem); for (seg = segnumv; seg < segnumv + nsegs; seg++) { if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { printk(KERN_WARNING "%s: invalid segment number: %llu\n", __func__, (unsigned long long)*seg); nerr++; } } if (nerr > 0) { ret = -EINVAL; goto out_sem; } ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; seg = segnumv; blkoff = nilfs_sufile_get_blkoff(sufile, *seg); ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); if (ret < 0) goto out_header; for (;;) { dofunc(sufile, *seg, header_bh, bh); if (++seg >= segnumv + nsegs) break; prev_blkoff = blkoff; blkoff = nilfs_sufile_get_blkoff(sufile, *seg); if (blkoff == prev_blkoff) continue; /* get different block */ brelse(bh); ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); if (unlikely(ret < 0)) goto out_header; } brelse(bh); out_header: n = seg - segnumv; brelse(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); out: if (ndone) *ndone = n; return ret; } int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, void (*dofunc)(struct inode *, __u64, struct buffer_head *, struct buffer_head *)) { struct buffer_head *header_bh, *bh; int ret; if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { printk(KERN_WARNING "%s: invalid segment number: %llu\n", __func__, (unsigned long long)segnum); return -EINVAL; } down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); if (!ret) { dofunc(sufile, segnum, header_bh, bh); brelse(bh); } brelse(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_set_alloc_range - limit range of segment to be allocated * @sufile: inode of segment usage file * @start: minimum segment number of allocatable region (inclusive) * @end: maximum segment number of allocatable region (inclusive) * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-ERANGE - invalid segment region */ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) { struct nilfs_sufile_info *sui = NILFS_SUI(sufile); __u64 nsegs; int ret = -ERANGE; down_write(&NILFS_MDT(sufile)->mi_sem); nsegs = nilfs_sufile_get_nsegments(sufile); if (start <= end && end < nsegs) { sui->allocmin = start; sui->allocmax = end; ret = 0; } up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_alloc - allocate a segment * @sufile: inode of segment usage file * @segnump: pointer to segment number * * Description: nilfs_sufile_alloc() allocates a clean segment. * * Return Value: On success, 0 is returned and the segment number of the * allocated segment is stored in the place pointed by @segnump. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOSPC - No clean segment left. */ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) { struct buffer_head *header_bh, *su_bh; struct nilfs_sufile_header *header; struct nilfs_segment_usage *su; struct nilfs_sufile_info *sui = NILFS_SUI(sufile); size_t susz = NILFS_MDT(sufile)->mi_entry_size; __u64 segnum, maxsegnum, last_alloc; void *kaddr; unsigned long nsegments, ncleansegs, nsus, cnt; int ret, j; down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); ncleansegs = le64_to_cpu(header->sh_ncleansegs); last_alloc = le64_to_cpu(header->sh_last_alloc); kunmap_atomic(kaddr); nsegments = nilfs_sufile_get_nsegments(sufile); maxsegnum = sui->allocmax; segnum = last_alloc + 1; if (segnum < sui->allocmin || segnum > sui->allocmax) segnum = sui->allocmin; for (cnt = 0; cnt < nsegments; cnt += nsus) { if (segnum > maxsegnum) { if (cnt < sui->allocmax - sui->allocmin + 1) { /* * wrap around in the limited region. * if allocation started from * sui->allocmin, this never happens. */ segnum = sui->allocmin; maxsegnum = last_alloc; } else if (segnum > sui->allocmin && sui->allocmax + 1 < nsegments) { segnum = sui->allocmax + 1; maxsegnum = nsegments - 1; } else if (sui->allocmin > 0) { segnum = 0; maxsegnum = sui->allocmin - 1; } else { break; /* never happens */ } } ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &su_bh); if (ret < 0) goto out_header; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); nsus = nilfs_sufile_segment_usages_in_block( sufile, segnum, maxsegnum); for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { if (!nilfs_segment_usage_clean(su)) continue; /* found a clean segment */ nilfs_segment_usage_set_dirty(su); kunmap_atomic(kaddr); kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); le64_add_cpu(&header->sh_ncleansegs, -1); le64_add_cpu(&header->sh_ndirtysegs, 1); header->sh_last_alloc = cpu_to_le64(segnum); kunmap_atomic(kaddr); sui->ncleansegs--; mark_buffer_dirty(header_bh); mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); brelse(su_bh); *segnump = segnum; goto out_header; } kunmap_atomic(kaddr); brelse(su_bh); } /* no segments left */ ret = -ENOSPC; out_header: brelse(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (unlikely(!nilfs_segment_usage_clean(su))) { printk(KERN_WARNING "%s: segment %llu must be clean\n", __func__, (unsigned long long)segnum); kunmap_atomic(kaddr); return; } nilfs_segment_usage_set_dirty(su); kunmap_atomic(kaddr); nilfs_sufile_mod_counter(header_bh, -1, 1); NILFS_SUI(sufile)->ncleansegs--; mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); } void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; int clean, dirty; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && su->su_nblocks == cpu_to_le32(0)) { kunmap_atomic(kaddr); return; } clean = nilfs_segment_usage_clean(su); dirty = nilfs_segment_usage_dirty(su); /* make the segment garbage */ su->su_lastmod = cpu_to_le64(0); su->su_nblocks = cpu_to_le32(0); su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); kunmap_atomic(kaddr); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); NILFS_SUI(sufile)->ncleansegs -= clean; mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); } void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; int sudirty; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_clean(su)) { printk(KERN_WARNING "%s: segment %llu is already clean\n", __func__, (unsigned long long)segnum); kunmap_atomic(kaddr); return; } WARN_ON(nilfs_segment_usage_error(su)); WARN_ON(!nilfs_segment_usage_dirty(su)); sudirty = nilfs_segment_usage_dirty(su); nilfs_segment_usage_set_clean(su); kunmap_atomic(kaddr); mark_buffer_dirty(su_bh); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); NILFS_SUI(sufile)->ncleansegs++; nilfs_mdt_mark_dirty(sufile); } /** * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty * @sufile: inode of segment usage file * @segnum: segment number */ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { struct buffer_head *bh; int ret; ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (!ret) { mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); brelse(bh); } return ret; } /** * nilfs_sufile_set_segment_usage - set usage of a segment * @sufile: inode of segment usage file * @segnum: segment number * @nblocks: number of live blocks in the segment * @modtime: modification time (option) */ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, unsigned long nblocks, time_t modtime) { struct buffer_head *bh; struct nilfs_segment_usage *su; void *kaddr; int ret; down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); WARN_ON(nilfs_segment_usage_error(su)); if (modtime) su->su_lastmod = cpu_to_le64(modtime); su->su_nblocks = cpu_to_le32(nblocks); kunmap_atomic(kaddr); mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); brelse(bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_get_stat - get segment usage statistics * @sufile: inode of segment usage file * @stat: pointer to a structure of segment usage statistics * * Description: nilfs_sufile_get_stat() returns information about segment * usage. * * Return Value: On success, 0 is returned, and segment usage information is * stored in the place pointed by @stat. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) { struct buffer_head *header_bh; struct nilfs_sufile_header *header; struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; void *kaddr; int ret; down_read(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); sustat->ss_ctime = nilfs->ns_ctime; sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; spin_lock(&nilfs->ns_last_segment_lock); sustat->ss_prot_seq = nilfs->ns_prot_seq; spin_unlock(&nilfs->ns_last_segment_lock); kunmap_atomic(kaddr); brelse(header_bh); out_sem: up_read(&NILFS_MDT(sufile)->mi_sem); return ret; } void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; int suclean; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_error(su)) { kunmap_atomic(kaddr); return; } suclean = nilfs_segment_usage_clean(su); nilfs_segment_usage_set_error(su); kunmap_atomic(kaddr); if (suclean) { nilfs_sufile_mod_counter(header_bh, -1, 0); NILFS_SUI(sufile)->ncleansegs--; } mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); } /** * nilfs_sufile_truncate_range - truncate range of segment array * @sufile: inode of segment usage file * @start: start segment number (inclusive) * @end: end segment number (inclusive) * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - Invalid number of segments specified * * %-EBUSY - Dirty or active segments are present in the range */ static int nilfs_sufile_truncate_range(struct inode *sufile, __u64 start, __u64 end) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *header_bh; struct buffer_head *su_bh; struct nilfs_segment_usage *su, *su2; size_t susz = NILFS_MDT(sufile)->mi_entry_size; unsigned long segusages_per_block; unsigned long nsegs, ncleaned; __u64 segnum; void *kaddr; ssize_t n, nc; int ret; int j; nsegs = nilfs_sufile_get_nsegments(sufile); ret = -EINVAL; if (start > end || start >= nsegs) goto out; ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out; segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); ncleaned = 0; for (segnum = start; segnum <= end; segnum += n) { n = min_t(unsigned long, segusages_per_block - nilfs_sufile_get_offset(sufile, segnum), end - segnum + 1); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh); if (ret < 0) { if (ret != -ENOENT) goto out_header; /* hole */ continue; } kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); su2 = su; for (j = 0; j < n; j++, su = (void *)su + susz) { if ((le32_to_cpu(su->su_flags) & ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || nilfs_segment_is_active(nilfs, segnum + j)) { ret = -EBUSY; kunmap_atomic(kaddr); brelse(su_bh); goto out_header; } } nc = 0; for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { if (nilfs_segment_usage_error(su)) { nilfs_segment_usage_set_clean(su); nc++; } } kunmap_atomic(kaddr); if (nc > 0) { mark_buffer_dirty(su_bh); ncleaned += nc; } brelse(su_bh); if (n == segusages_per_block) { /* make hole */ nilfs_sufile_delete_segment_usage_block(sufile, segnum); } } ret = 0; out_header: if (ncleaned > 0) { NILFS_SUI(sufile)->ncleansegs += ncleaned; nilfs_sufile_mod_counter(header_bh, ncleaned, 0); nilfs_mdt_mark_dirty(sufile); } brelse(header_bh); out: return ret; } /** * nilfs_sufile_resize - resize segment array * @sufile: inode of segment usage file * @newnsegs: new number of segments * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOSPC - Enough free space is not left for shrinking * * %-EBUSY - Dirty or active segments exist in the region to be truncated */ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *header_bh; struct nilfs_sufile_header *header; struct nilfs_sufile_info *sui = NILFS_SUI(sufile); void *kaddr; unsigned long nsegs, nrsvsegs; int ret = 0; down_write(&NILFS_MDT(sufile)->mi_sem); nsegs = nilfs_sufile_get_nsegments(sufile); if (nsegs == newnsegs) goto out; ret = -ENOSPC; nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) goto out; ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out; if (newnsegs > nsegs) { sui->ncleansegs += newnsegs - nsegs; } else /* newnsegs < nsegs */ { ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); if (ret < 0) goto out_header; sui->ncleansegs -= nsegs - newnsegs; } kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(sufile); nilfs_set_nsegments(nilfs, newnsegs); out_header: brelse(header_bh); out: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_get_suinfo - * @sufile: inode of segment usage file * @segnum: segment number to start looking * @buf: array of suinfo * @sisz: byte size of suinfo * @nsi: size of suinfo array * * Description: * * Return Value: On success, 0 is returned and .... On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, unsigned sisz, size_t nsi) { struct buffer_head *su_bh; struct nilfs_segment_usage *su; struct nilfs_suinfo *si = buf; size_t susz = NILFS_MDT(sufile)->mi_entry_size; struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; void *kaddr; unsigned long nsegs, segusages_per_block; ssize_t n; int ret, i, j; down_read(&NILFS_MDT(sufile)->mi_sem); segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); nsegs = min_t(unsigned long, nilfs_sufile_get_nsegments(sufile) - segnum, nsi); for (i = 0; i < nsegs; i += n, segnum += n) { n = min_t(unsigned long, segusages_per_block - nilfs_sufile_get_offset(sufile, segnum), nsegs - i); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh); if (ret < 0) { if (ret != -ENOENT) goto out; /* hole */ memset(si, 0, sisz * n); si = (void *)si + sisz * n; continue; } kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); for (j = 0; j < n; j++, su = (void *)su + susz, si = (void *)si + sisz) { si->sui_lastmod = le64_to_cpu(su->su_lastmod); si->sui_nblocks = le32_to_cpu(su->su_nblocks); si->sui_flags = le32_to_cpu(su->su_flags) & ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); if (nilfs_segment_is_active(nilfs, segnum + j)) si->sui_flags |= (1UL << NILFS_SEGMENT_USAGE_ACTIVE); } kunmap_atomic(kaddr); brelse(su_bh); } ret = nsegs; out: up_read(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_read - read or get sufile inode * @sb: super block instance * @susize: size of a segment usage entry * @raw_inode: on-disk sufile inode * @inodep: buffer to store the inode */ int nilfs_sufile_read(struct super_block *sb, size_t susize, struct nilfs_inode *raw_inode, struct inode **inodep) { struct inode *sufile; struct nilfs_sufile_info *sui; struct buffer_head *header_bh; struct nilfs_sufile_header *header; void *kaddr; int err; sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); if (unlikely(!sufile)) return -ENOMEM; if (!(sufile->i_state & I_NEW)) goto out; err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui)); if (err) goto failed; nilfs_mdt_set_entry_size(sufile, susize, sizeof(struct nilfs_sufile_header)); err = nilfs_read_inode_common(sufile, raw_inode); if (err) goto failed; err = nilfs_sufile_get_header_block(sufile, &header_bh); if (err) goto failed; sui = NILFS_SUI(sufile); kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); kunmap_atomic(kaddr); brelse(header_bh); sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; sui->allocmin = 0; unlock_new_inode(sufile); out: *inodep = sufile; return 0; failed: iget_failed(sufile); return err; }
gpl-2.0