content
stringlengths
19
48.2k
/* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of * waitqueues where the bucket discipline is to maintain all * waiters on the same queue and wake all when any of the pages * become available, and for the woken contexts to check to be * sure the appropriate page became available, this saves space * at a cost of "thundering herd" phenomena during rare hash * collisions. */ static wait_queue_head_t *page_waitqueue(struct page *page) { const struct zone *zone = page_zone(page); return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; }
/* *---------------------------------------------------------------------- * * PrintSourceToObj -- * * Appends a quoted representation of a string to a Tcl_Obj. * *---------------------------------------------------------------------- */ static void PrintSourceToObj( Tcl_Obj *appendObj, const char *stringPtr, int maxChars) { register const char *p; register int i = 0; if (stringPtr == NULL) { Tcl_AppendToObj(appendObj, "\"\"", -1); return; } Tcl_AppendToObj(appendObj, "\"", -1); p = stringPtr; for (; (*p != '\0') && (i < maxChars); p++, i++) { switch (*p) { case '"': Tcl_AppendToObj(appendObj, "\\\"", -1); continue; case '\f': Tcl_AppendToObj(appendObj, "\\f", -1); continue; case '\n': Tcl_AppendToObj(appendObj, "\\n", -1); continue; case '\r': Tcl_AppendToObj(appendObj, "\\r", -1); continue; case '\t': Tcl_AppendToObj(appendObj, "\\t", -1); continue; case '\v': Tcl_AppendToObj(appendObj, "\\v", -1); continue; default: Tcl_AppendPrintfToObj(appendObj, "%c", *p); continue; } } Tcl_AppendToObj(appendObj, "\"", -1); }
/* * Get HT related information from beacon and save it in BssDesc * * (1) Parse HTCap, and HTInfo, and record whether it is 11n AP * (2) If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT() * (3) Check whether peer is Realtek AP (for Realtek proprietary aggregation mode). * Input: * PADAPTER Adapter * * Output: * PRT_TCB BssDesc * */ VOID HTGetValueFromBeaconOrProbeRsp( PADAPTER Adapter, POCTET_STRING pSRCmmpdu, PRT_WLAN_BSS bssDesc ) { OCTET_STRING HTCapIE, HTInfoIE, OBSSIE, BssCoexistenceIE, mmpdu; mmpdu.Octet = pSRCmmpdu->Octet; mmpdu.Length = pSRCmmpdu->Length; HTInitializeBssDesc (&bssDesc->BssHT); HTCapIE = PacketGetElement(mmpdu, EID_HTCapability, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE); RT_DISP(FBEACON, BCN_SHOW, ("EID_HTCapability HTCapIE.Length=%d\r\n", HTCapIE.Length)) if(HTCapIE.Length == 0) { HTCapIE = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_11N_EWC_HT_CAP, OUI_SUBTYPE_DONT_CARE); if(HTCapIE.Length != 0) bssDesc->BssHT.bdHTSpecVer= HT_SPEC_VER_EWC; RT_DISP(FBEACON, BCN_SHOW, ("EID_Vendor HTCapIE.Length=%d\r\n", HTCapIE.Length)) } if(HTCapIE.Length != 0) HTParsingHTCapElement(Adapter, HTCapIE, bssDesc); HTInfoIE = PacketGetElement(mmpdu, EID_HTInfo, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE); if(HTInfoIE.Length == 0) { HTInfoIE = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_11N_EWC_HT_INFO, OUI_SUBTYPE_DONT_CARE); if(HTInfoIE.Length != 0) bssDesc->BssHT.bdHTSpecVer = HT_SPEC_VER_EWC; } if(HTInfoIE.Length != 0) HTParsingHTInfoElement(Adapter, HTInfoIE, bssDesc); if(HTCapIE.Length != 0) { bssDesc->BssHT.bdSupportHT = TRUE; if(bssDesc->BssQos.bdQoSMode == QOS_DISABLE) QosSetLegacyWMMParamWithHT(Adapter, bssDesc); } else { bssDesc->BssHT.bdSupportHT = FALSE; } OBSSIE = PacketGetElement(mmpdu, EID_OBSS, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE); if(OBSSIE.Length != 0) BSS_ParsingOBSSInfoElement(Adapter, OBSSIE, bssDesc); BssCoexistenceIE = PacketGetElement(mmpdu, EID_BSSCoexistence, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE ); if(BssCoexistenceIE.Length != 0) BSS_ParsingBSSCoexistElement(Adapter, BssCoexistenceIE, bssDesc); if(HTCapIE.Length!=0 && HTCapIE.Octet != NULL) { bssDesc->bdBandWidth = (CHANNEL_WIDTH)(GET_HT_CAPABILITY_ELE_CHL_WIDTH(HTCapIE.Octet)); bssDesc->BssHT.bd40Intolerant = GET_HT_CAPABILITY_ELE_FORTY_INTOLERANT(HTCapIE.Octet); } else bssDesc->bdBandWidth = CHANNEL_WIDTH_20; }
/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid Source file: ./lib/Cshift.h Copyright (C) 2015 Author: Peter Boyle <paboyle@ph.ed.ac.uk> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ #ifndef _GRID_FFT_H_ #define _GRID_FFT_H_ #ifdef HAVE_FFTW #ifdef USE_MKL #include <fftw/fftw3.h> #else #include <fftw3.h> #endif #endif NAMESPACE_BEGIN(Grid); template<class scalar> struct FFTW { }; #ifdef HAVE_FFTW template<> struct FFTW<ComplexD> { public: typedef fftw_complex FFTW_scalar; typedef fftw_plan FFTW_plan; static FFTW_plan fftw_plan_many_dft(int rank, const int *n,int howmany, FFTW_scalar *in, const int *inembed, int istride, int idist, FFTW_scalar *out, const int *onembed, int ostride, int odist, int sign, unsigned flags) { return ::fftw_plan_many_dft(rank,n,howmany,in,inembed,istride,idist,out,onembed,ostride,odist,sign,flags); } static void fftw_flops(const FFTW_plan p,double *add, double *mul, double *fmas){ ::fftw_flops(p,add,mul,fmas); } inline static void fftw_execute_dft(const FFTW_plan p,FFTW_scalar *in,FFTW_scalar *out) { ::fftw_execute_dft(p,in,out); } inline static void fftw_destroy_plan(const FFTW_plan p) { ::fftw_destroy_plan(p); } }; template<> struct FFTW<ComplexF> { public: typedef fftwf_complex FFTW_scalar; typedef fftwf_plan FFTW_plan; static FFTW_plan fftw_plan_many_dft(int rank, const int *n,int howmany, FFTW_scalar *in, const int *inembed, int istride, int idist, FFTW_scalar *out, const int *onembed, int ostride, int odist, int sign, unsigned flags) { return ::fftwf_plan_many_dft(rank,n,howmany,in,inembed,istride,idist,out,onembed,ostride,odist,sign,flags); } static void fftw_flops(const FFTW_plan p,double *add, double *mul, double *fmas){ ::fftwf_flops(p,add,mul,fmas); } inline static void fftw_execute_dft(const FFTW_plan p,FFTW_scalar *in,FFTW_scalar *out) { ::fftwf_execute_dft(p,in,out); } inline static void fftw_destroy_plan(const FFTW_plan p) { ::fftwf_destroy_plan(p); } }; #endif #ifndef FFTW_FORWARD #define FFTW_FORWARD (-1) #define FFTW_BACKWARD (+1) #endif class FFT { private: GridCartesian *vgrid; GridCartesian *sgrid; int Nd; double flops; double flops_call; uint64_t usec; Coordinate dimensions; Coordinate processors; Coordinate processor_coor; public: static const int forward=FFTW_FORWARD; static const int backward=FFTW_BACKWARD; double Flops(void) {return flops;} double MFlops(void) {return flops/usec;} double USec(void) {return (double)usec;} FFT ( GridCartesian * grid ) : vgrid(grid), Nd(grid->_ndimension), dimensions(grid->_fdimensions), processors(grid->_processors), processor_coor(grid->_processor_coor) { flops=0; usec =0; Coordinate layout(Nd,1); sgrid = new GridCartesian(dimensions,layout,processors,*grid); }; ~FFT ( void) { delete sgrid; } template<class vobj> void FFT_dim_mask(Lattice<vobj> &result,const Lattice<vobj> &source,Coordinate mask,int sign){ conformable(result.Grid(),vgrid); conformable(source.Grid(),vgrid); Lattice<vobj> tmp(vgrid); tmp = source; for(int d=0;d<Nd;d++){ if( mask[d] ) { FFT_dim(result,tmp,d,sign); tmp=result; } } } template<class vobj> void FFT_all_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int sign){ Coordinate mask(Nd,1); FFT_dim_mask(result,source,mask,sign); } template<class vobj> void FFT_dim(Lattice<vobj> &result,const Lattice<vobj> &source,int dim, int sign){ #ifndef HAVE_FFTW assert(0); #else conformable(result.Grid(),vgrid); conformable(source.Grid(),vgrid); int L = vgrid->_ldimensions[dim]; int G = vgrid->_fdimensions[dim]; Coordinate layout(Nd,1); Coordinate pencil_gd(vgrid->_fdimensions); pencil_gd[dim] = G*processors[dim]; // Pencil global vol LxLxGxLxL per node GridCartesian pencil_g(pencil_gd,layout,processors,*vgrid); // Construct pencils typedef typename vobj::scalar_object sobj; typedef typename sobj::scalar_type scalar; Lattice<sobj> pgbuf(&pencil_g); autoView(pgbuf_v , pgbuf, CpuWrite); typedef typename FFTW<scalar>::FFTW_scalar FFTW_scalar; typedef typename FFTW<scalar>::FFTW_plan FFTW_plan; int Ncomp = sizeof(sobj)/sizeof(scalar); int Nlow = 1; for(int d=0;d<dim;d++){ Nlow*=vgrid->_ldimensions[d]; } int rank = 1; /* 1d transforms */ int n[] = {G}; /* 1d transforms of length G */ int howmany = Ncomp; int odist,idist,istride,ostride; idist = odist = 1; /* Distance between consecutive FT's */ istride = ostride = Ncomp*Nlow; /* distance between two elements in the same FT */ int *inembed = n, *onembed = n; scalar div; if ( sign == backward ) div = 1.0/G; else if ( sign == forward ) div = 1.0; else assert(0); FFTW_plan p; { FFTW_scalar *in = (FFTW_scalar *)&pgbuf_v[0]; FFTW_scalar *out= (FFTW_scalar *)&pgbuf_v[0]; p = FFTW<scalar>::fftw_plan_many_dft(rank,n,howmany, in,inembed, istride,idist, out,onembed, ostride, odist, sign,FFTW_ESTIMATE); } // Barrel shift and collect global pencil Coordinate lcoor(Nd), gcoor(Nd); result = source; int pc = processor_coor[dim]; for(int p=0;p<processors[dim];p++) { { autoView(r_v,result,CpuRead); autoView(p_v,pgbuf,CpuWrite); thread_for(idx, sgrid->lSites(),{ Coordinate cbuf(Nd); sobj s; sgrid->LocalIndexToLocalCoor(idx,cbuf); peekLocalSite(s,r_v,cbuf); cbuf[dim]+=((pc+p) % processors[dim])*L; pokeLocalSite(s,p_v,cbuf); }); } if (p != processors[dim] - 1) { result = Cshift(result,dim,L); } } // Loop over orthog coords int NN=pencil_g.lSites(); GridStopWatch timer; timer.Start(); thread_for( idx,NN,{ Coordinate cbuf(Nd); pencil_g.LocalIndexToLocalCoor(idx, cbuf); if ( cbuf[dim] == 0 ) { // restricts loop to plane at lcoor[dim]==0 FFTW_scalar *in = (FFTW_scalar *)&pgbuf_v[idx]; FFTW_scalar *out= (FFTW_scalar *)&pgbuf_v[idx]; FFTW<scalar>::fftw_execute_dft(p,in,out); } }); timer.Stop(); // performance counting double add,mul,fma; FFTW<scalar>::fftw_flops(p,&add,&mul,&fma); flops_call = add+mul+2.0*fma; usec += timer.useconds(); flops+= flops_call*NN; // writing out result { autoView(pgbuf_v,pgbuf,CpuRead); autoView(result_v,result,CpuWrite); thread_for(idx,sgrid->lSites(),{ Coordinate clbuf(Nd), cgbuf(Nd); sobj s; sgrid->LocalIndexToLocalCoor(idx,clbuf); cgbuf = clbuf; cgbuf[dim] = clbuf[dim]+L*pc; peekLocalSite(s,pgbuf_v,cgbuf); pokeLocalSite(s,result_v,clbuf); }); } result = result*div; // destroying plan FFTW<scalar>::fftw_destroy_plan(p); #endif } }; NAMESPACE_END(Grid); #endif
/** * Decode content of license file * @param license_file_name * @param content is a pointer points to a memory buffer to contain decripted content of license * @return * - NULL if file does not exist * - length of the content */ static inline int _decode_license_file( const char* license_file_name, license_content_t *li ){ int len = 0, i; int ch; char license_decrypt_key[1000]; char string[10]; int mac_len; FILE *file = fopen( license_file_name, "r" ); long int sum_license; long int val; if(file == NULL) return MMT_LICENSE_KEY_DOES_NOT_EXIST; while( (ch=fgetc( file )) != EOF ){ ch = ch + (8*4-3); license_decrypt_key[ len++ ] = ch; } license_decrypt_key [len] = '\0'; if( len < 11 ){ fclose( file ); return 2; } strncpy(string, &license_decrypt_key[0], 4); string[4] = '\0'; li->year = atoi( string ); strncpy(string, &license_decrypt_key[4], 2); string[2] = '\0'; li->month = atoi( string ); strncpy( string, &license_decrypt_key[6], 2); string[2] = '\0'; li->day = atoi( string ); strncpy( string, &license_decrypt_key[8], 3); string[3] = '\0'; li->mac_count = atoi( string ); mac_len = li->mac_count * 12; if( len-11 < mac_len ){ fclose( file ); return MMT_LICENSE_MODIFIED; } sum_license = 0; for( i=0; i<mac_len; i++ ){ li->mac_addresses[ i ] = license_decrypt_key[i+11]; sum_license += license_decrypt_key[i+11]; } sum_license += (li->year * li->month * li->day) + li->mac_count; val = atol( &license_decrypt_key[ 11 + mac_len ] ); if( val != sum_license ){ fclose( file ); return MMT_LICENSE_MODIFIED; } fclose( file ); return 0; }
/* * Function: my_free * Purpose: emulate free plus check memory allocation for debug. */ void my_free(void* ptr, char*file, int line) { char* newptr; size_t ptrsize; freescalled++; if (!isValidPointer(ptr, file, line)) { printf("\nfree MEM overwrite: FILE %s LINE %d\n[mallocs %ld] [frees %ld] [reallocs %ld]\n", file, line, mallocscalled, freescalled, reallocscalled); exit(1); } newptr = ((char*)ptr) - sizeof(uint16_t) - sizeof(size_t); ptrsize = *((size_t*)&newptr[sizeof(uint16_t)]); memset(ptr, 0xFF, ptrsize); free(newptr); return; }
/* * If no lower streams are plumbed, then this function deallocates all * internal storage in preparation for unload. * * Called by sppptun_mod.c:_fini(). Assumes that we're exclusive on * both perimeters. */ void sppptun_tcl_fini(void) { if (tcl_minor_arena != NULL) { vmem_destroy(tcl_minor_arena); tcl_minor_arena = NULL; } if (tcl_cache != NULL) { kmem_cache_destroy(tcl_cache); tcl_cache = NULL; } kmem_free(tcl_slots, tcl_nslots * sizeof (tuncl_t *)); tcl_slots = NULL; rw_destroy(&tcl_rwlock); ASSERT(tcl_slots == NULL); ASSERT(tcl_cache == NULL); ASSERT(tcl_minor_arena == NULL); }
/* xlFindTopProcedure - find the top procedure on the call stack */ xlValue xlFindTopProcedure(void) { xlValue *p; for (p = xlCSP; p > xlStkBase; p = xlCDUnmark(p)) if ((xlCDispatch *)p[-1] == &cd_code) return p[-2]; return xlNil; }
/* Transaction is considered as in-doubt if it is globally committed by DTMD but local commit is not yet completed. * It can happen because we report DTMD about transaction commit in SetTransactionStatus, which is called inside commit * after saving transaction state in WAL but before releasing locks. So DTMD can include this transaction in snapshot * before local commit is completed and transaction is marked as completed in local CLOG. * * We use xid_in_doubt hash table to mark transactions which are "precommitted". Entry is inserted in hash table * before seding status to DTMD and removed after receving response from DTMD and setting transaction status in local CLOG. * So information about transaction should always present either in xid_in_doubt either in CLOG. */ static bool TransactionIdIsInDoubt(TransactionId xid) { bool inDoubt; if (!TransactionIdIsInSnapshot(xid, &DtmSnapshot)) { LWLockAcquire(dtm->hashLock, LW_SHARED); inDoubt = hash_search(xid_in_doubt, &xid, HASH_FIND, NULL) != NULL; LWLockRelease(dtm->hashLock); if (!inDoubt) { XLogRecPtr lsn; inDoubt = DtmGetTransactionStatus(xid, &lsn) != TRANSACTION_STATUS_IN_PROGRESS; } if (inDoubt) { XTM_INFO("Wait for transaction %d to complete\n", xid); XactLockTableWait(xid, NULL, NULL, XLTW_None); return true; } } return false; }
/** * \brief Exported module wrapper for the \f$4\times 7\f$ * implementation of the sparse triangular solve operation, * where the matrix is lower triangular. */ static void MatTransTrisolve_Lower( const oski_submatMBCSR_t* T, oski_value_t alpha, oski_vecview_t x ) { assert( T != NULL ); assert( T->r == 4 ); assert( T->c == 7 ); assert( x != INVALID_VEC ); if( x->rowinc == 1 ) { oski_index_t j; oski_value_t* xp; for( j = 0, xp = x->val; j < x->num_cols; j++, xp += x->colinc ) MBCSR_MatTransTrisolve_Lower_v1_aX_xs1( T->num_block_rows, T->offset, T->bptr, T->bind, T->bval, T->bdiag, alpha, xp ); } else { oski_index_t j; oski_value_t* xp; for( j = 0, xp = x->val; j < x->num_cols; j++, xp += x->colinc ) MBCSR_MatTransTrisolve_Lower_v1_aX_xsX( T->num_block_rows, T->offset, T->bptr, T->bind, T->bval, T->bdiag, alpha, xp, x->rowinc ); } }
/* save the window and its current geometry into the geometry hashtable */ void window_geom_save(const gchar *name, window_geometry_t *geom) { gchar *key; window_geometry_t *work; if (!window_geom_hash) { window_geom_hash = g_hash_table_new(g_str_hash, g_str_equal); } work = (window_geometry_t *)g_hash_table_lookup(window_geom_hash, name); if (work) { g_hash_table_remove(window_geom_hash, name); g_free(work->key); g_free(work); } work = (window_geometry_t *)g_malloc(sizeof(window_geometry_t)); *work = *geom; key = g_strdup(name); work->key = key; g_hash_table_insert(window_geom_hash, key, work); }
/** * Start constructing the ICV for the associated data */ static __m128i icv_header(private_aesni_gcm_t *this, void *assoc, size_t alen) { u_int blocks, pblocks, rem, i; __m128i h1, h2, h3, h4, d1, d2, d3, d4; __m128i y, last, *ab; h1 = this->hhhh; h2 = this->hhh; h3 = this->hh; h4 = this->h; y = _mm_setzero_si128(); ab = assoc; blocks = alen / AES_BLOCK_SIZE; pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM); rem = alen % AES_BLOCK_SIZE; for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM) { d1 = _mm_loadu_si128(ab + i + 0); d2 = _mm_loadu_si128(ab + i + 1); d3 = _mm_loadu_si128(ab + i + 2); d4 = _mm_loadu_si128(ab + i + 3); y = _mm_xor_si128(y, d1); y = mult4xor(h1, h2, h3, h4, y, d2, d3, d4); } for (i = pblocks; i < blocks; i++) { y = ghash(this->h, y, _mm_loadu_si128(ab + i)); } if (rem) { last = _mm_setzero_si128(); memcpy(&last, ab + blocks, rem); y = ghash(this->h, y, last); } return y; }
/* * Enter an RCU read-side critical section. * * The first cmm_barrier() call ensures that the compiler does not reorder * the body of _rcu_read_lock() with a mutex. * * This function and its helper are both less than 10 lines long. The * intent is that this function meets the 10-line criterion in LGPL, * allowing this function to be invoked directly from non-LGPL code. */ static inline void _rcu_read_lock(void) { unsigned long tmp; if (caa_unlikely(!URCU_TLS(rcu_reader))) rcu_bp_register(); cmm_barrier(); tmp = URCU_TLS(rcu_reader)->ctr; _rcu_read_lock_update(tmp); }
/* * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de> * Copyright (C) 2005-2009 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/export.h> #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/err.h> #include <linux/platform_device.h> #include "imx-ipu-v3.h" #include "ipu-prv.h" struct ipu_di { void __iomem *base; int id; u32 module; struct clk *clk_di; /* display input clock */ struct clk *clk_ipu; /* IPU bus clock */ struct clk *clk_di_pixel; /* resulting pixel clock */ bool inuse; struct ipu_soc *ipu; }; static DEFINE_MUTEX(di_mutex); struct di_sync_config { int run_count; int run_src; int offset_count; int offset_src; int repeat_count; int cnt_clr_src; int cnt_polarity_gen_en; int cnt_polarity_clr_src; int cnt_polarity_trigger_src; int cnt_up; int cnt_down; }; enum di_pins { DI_PIN11 = 0, DI_PIN12 = 1, DI_PIN13 = 2, DI_PIN14 = 3, DI_PIN15 = 4, DI_PIN16 = 5, DI_PIN17 = 6, DI_PIN_CS = 7, DI_PIN_SER_CLK = 0, DI_PIN_SER_RS = 1, }; enum di_sync_wave { DI_SYNC_NONE = 0, DI_SYNC_CLK = 1, DI_SYNC_INT_HSYNC = 2, DI_SYNC_HSYNC = 3, DI_SYNC_VSYNC = 4, DI_SYNC_DE = 6, }; #define SYNC_WAVE 0 #define DI_GENERAL 0x0000 #define DI_BS_CLKGEN0 0x0004 #define DI_BS_CLKGEN1 0x0008 #define DI_SW_GEN0(gen) (0x000c + 4 * ((gen) - 1)) #define DI_SW_GEN1(gen) (0x0030 + 4 * ((gen) - 1)) #define DI_STP_REP(gen) (0x0148 + 4 * (((gen) - 1)/2)) #define DI_SYNC_AS_GEN 0x0054 #define DI_DW_GEN(gen) (0x0058 + 4 * (gen)) #define DI_DW_SET(gen, set) (0x0088 + 4 * ((gen) + 0xc * (set))) #define DI_SER_CONF 0x015c #define DI_SSC 0x0160 #define DI_POL 0x0164 #define DI_AW0 0x0168 #define DI_AW1 0x016c #define DI_SCR_CONF 0x0170 #define DI_STAT 0x0174 #define DI_SW_GEN0_RUN_COUNT(x) ((x) << 19) #define DI_SW_GEN0_RUN_SRC(x) ((x) << 16) #define DI_SW_GEN0_OFFSET_COUNT(x) ((x) << 3) #define DI_SW_GEN0_OFFSET_SRC(x) ((x) << 0) #define DI_SW_GEN1_CNT_POL_GEN_EN(x) ((x) << 29) #define DI_SW_GEN1_CNT_CLR_SRC(x) ((x) << 25) #define DI_SW_GEN1_CNT_POL_TRIGGER_SRC(x) ((x) << 12) #define DI_SW_GEN1_CNT_POL_CLR_SRC(x) ((x) << 9) #define DI_SW_GEN1_CNT_DOWN(x) ((x) << 16) #define DI_SW_GEN1_CNT_UP(x) (x) #define DI_SW_GEN1_AUTO_RELOAD (0x10000000) #define DI_DW_GEN_ACCESS_SIZE_OFFSET 24 #define DI_DW_GEN_COMPONENT_SIZE_OFFSET 16 #define DI_GEN_POLARITY_1 (1 << 0) #define DI_GEN_POLARITY_2 (1 << 1) #define DI_GEN_POLARITY_3 (1 << 2) #define DI_GEN_POLARITY_4 (1 << 3) #define DI_GEN_POLARITY_5 (1 << 4) #define DI_GEN_POLARITY_6 (1 << 5) #define DI_GEN_POLARITY_7 (1 << 6) #define DI_GEN_POLARITY_8 (1 << 7) #define DI_GEN_POLARITY_DISP_CLK (1 << 17) #define DI_GEN_DI_CLK_EXT (1 << 20) #define DI_GEN_DI_VSYNC_EXT (1 << 21) #define DI_POL_DRDY_DATA_POLARITY (1 << 7) #define DI_POL_DRDY_POLARITY_15 (1 << 4) #define DI_VSYNC_SEL_OFFSET 13 static inline u32 ipu_di_read(struct ipu_di *di, unsigned offset) { return readl(di->base + offset); } static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset) { writel(value, di->base + offset); } static void ipu_di_data_wave_config(struct ipu_di *di, int wave_gen, int access_size, int component_size) { u32 reg; reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) | (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET); ipu_di_write(di, reg, DI_DW_GEN(wave_gen)); } static void ipu_di_data_pin_config(struct ipu_di *di, int wave_gen, int di_pin, int set, int up, int down) { u32 reg; reg = ipu_di_read(di, DI_DW_GEN(wave_gen)); reg &= ~(0x3 << (di_pin * 2)); reg |= set << (di_pin * 2); ipu_di_write(di, reg, DI_DW_GEN(wave_gen)); ipu_di_write(di, (down << 16) | up, DI_DW_SET(wave_gen, set)); } static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config, int start, int count) { u32 reg; int i; for (i = 0; i < count; i++) { struct di_sync_config *c = &config[i]; int wave_gen = start + i + 1; if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) || (c->repeat_count >= 0x1000) || (c->cnt_up >= 0x400) || (c->cnt_down >= 0x400)) { dev_err(di->ipu->dev, "DI%d counters out of range.\n", di->id); return; } reg = DI_SW_GEN0_RUN_COUNT(c->run_count) | DI_SW_GEN0_RUN_SRC(c->run_src) | DI_SW_GEN0_OFFSET_COUNT(c->offset_count) | DI_SW_GEN0_OFFSET_SRC(c->offset_src); ipu_di_write(di, reg, DI_SW_GEN0(wave_gen)); reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) | DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) | DI_SW_GEN1_CNT_POL_TRIGGER_SRC( c->cnt_polarity_trigger_src) | DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) | DI_SW_GEN1_CNT_DOWN(c->cnt_down) | DI_SW_GEN1_CNT_UP(c->cnt_up); /* Enable auto reload */ if (c->repeat_count == 0) reg |= DI_SW_GEN1_AUTO_RELOAD; ipu_di_write(di, reg, DI_SW_GEN1(wave_gen)); reg = ipu_di_read(di, DI_STP_REP(wave_gen)); reg &= ~(0xffff << (16 * ((wave_gen - 1) & 0x1))); reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1)); ipu_di_write(di, reg, DI_STP_REP(wave_gen)); } } static void ipu_di_sync_config_interlaced(struct ipu_di *di, struct ipu_di_signal_cfg *sig) { u32 h_total = sig->width + sig->h_sync_width + sig->h_start_width + sig->h_end_width; u32 v_total = sig->height + sig->v_sync_width + sig->v_start_width + sig->v_end_width; u32 reg; struct di_sync_config cfg[] = { { .run_count = h_total / 2 - 1, .run_src = DI_SYNC_CLK, }, { .run_count = h_total - 11, .run_src = DI_SYNC_CLK, .cnt_down = 4, }, { .run_count = v_total * 2 - 1, .run_src = DI_SYNC_INT_HSYNC, .offset_count = 1, .offset_src = DI_SYNC_INT_HSYNC, .cnt_down = 4, }, { .run_count = v_total / 2 - 1, .run_src = DI_SYNC_HSYNC, .offset_count = sig->v_start_width, .offset_src = DI_SYNC_HSYNC, .repeat_count = 2, .cnt_clr_src = DI_SYNC_VSYNC, }, { .run_src = DI_SYNC_HSYNC, .repeat_count = sig->height / 2, .cnt_clr_src = 4, }, { .run_count = v_total - 1, .run_src = DI_SYNC_HSYNC, }, { .run_count = v_total / 2 - 1, .run_src = DI_SYNC_HSYNC, .offset_count = 9, .offset_src = DI_SYNC_HSYNC, .repeat_count = 2, .cnt_clr_src = DI_SYNC_VSYNC, }, { .run_src = DI_SYNC_CLK, .offset_count = sig->h_start_width, .offset_src = DI_SYNC_CLK, .repeat_count = sig->width, .cnt_clr_src = 5, }, { .run_count = v_total - 1, .run_src = DI_SYNC_INT_HSYNC, .offset_count = v_total / 2, .offset_src = DI_SYNC_INT_HSYNC, .cnt_clr_src = DI_SYNC_HSYNC, .cnt_down = 4, } }; ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg)); /* set gentime select and tag sel */ reg = ipu_di_read(di, DI_SW_GEN1(9)); reg &= 0x1FFFFFFF; reg |= (3 - 1) << 29 | 0x00008000; ipu_di_write(di, reg, DI_SW_GEN1(9)); ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF); } static void ipu_di_sync_config_noninterlaced(struct ipu_di *di, struct ipu_di_signal_cfg *sig, int div) { u32 h_total = sig->width + sig->h_sync_width + sig->h_start_width + sig->h_end_width; u32 v_total = sig->height + sig->v_sync_width + sig->v_start_width + sig->v_end_width; struct di_sync_config cfg[] = { { /* 1: INT_HSYNC */ .run_count = h_total - 1, .run_src = DI_SYNC_CLK, } , { /* PIN2: HSYNC */ .run_count = h_total - 1, .run_src = DI_SYNC_CLK, .offset_count = div * sig->v_to_h_sync, .offset_src = DI_SYNC_CLK, .cnt_polarity_gen_en = 1, .cnt_polarity_trigger_src = DI_SYNC_CLK, .cnt_down = sig->h_sync_width * 2, } , { /* PIN3: VSYNC */ .run_count = v_total - 1, .run_src = DI_SYNC_INT_HSYNC, .cnt_polarity_gen_en = 1, .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC, .cnt_down = sig->v_sync_width * 2, } , { /* 4: Line Active */ .run_src = DI_SYNC_HSYNC, .offset_count = sig->v_sync_width + sig->v_start_width, .offset_src = DI_SYNC_HSYNC, .repeat_count = sig->height, .cnt_clr_src = DI_SYNC_VSYNC, } , { /* 5: Pixel Active, referenced by DC */ .run_src = DI_SYNC_CLK, .offset_count = sig->h_sync_width + sig->h_start_width, .offset_src = DI_SYNC_CLK, .repeat_count = sig->width, .cnt_clr_src = 5, /* Line Active */ } , { /* unused */ } , { /* unused */ } , { /* unused */ } , { /* unused */ }, }; /* can't use #7 and #8 for line active and pixel active counters */ struct di_sync_config cfg_vga[] = { { /* 1: INT_HSYNC */ .run_count = h_total - 1, .run_src = DI_SYNC_CLK, } , { /* 2: VSYNC */ .run_count = v_total - 1, .run_src = DI_SYNC_INT_HSYNC, } , { /* 3: Line Active */ .run_src = DI_SYNC_INT_HSYNC, .offset_count = sig->v_sync_width + sig->v_start_width, .offset_src = DI_SYNC_INT_HSYNC, .repeat_count = sig->height, .cnt_clr_src = 3 /* VSYNC */, } , { /* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */ .run_count = h_total - 1, .run_src = DI_SYNC_CLK, .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */ .offset_src = DI_SYNC_CLK, .cnt_polarity_gen_en = 1, .cnt_polarity_trigger_src = DI_SYNC_CLK, .cnt_down = sig->h_sync_width * 2, } , { /* 5: Pixel Active signal to DC */ .run_src = DI_SYNC_CLK, .offset_count = sig->h_sync_width + sig->h_start_width, .offset_src = DI_SYNC_CLK, .repeat_count = sig->width, .cnt_clr_src = 4, /* Line Active */ } , { /* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */ .run_count = v_total - 1, .run_src = DI_SYNC_INT_HSYNC, .offset_count = 1, /* magic value from Freescale TVE driver */ .offset_src = DI_SYNC_INT_HSYNC, .cnt_polarity_gen_en = 1, .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC, .cnt_down = sig->v_sync_width * 2, } , { /* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */ .run_count = h_total - 1, .run_src = DI_SYNC_CLK, .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */ .offset_src = DI_SYNC_CLK, .cnt_polarity_gen_en = 1, .cnt_polarity_trigger_src = DI_SYNC_CLK, .cnt_down = sig->h_sync_width * 2, } , { /* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */ .run_count = v_total - 1, .run_src = DI_SYNC_INT_HSYNC, .offset_count = 1, /* magic value from Freescale TVE driver */ .offset_src = DI_SYNC_INT_HSYNC, .cnt_polarity_gen_en = 1, .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC, .cnt_down = sig->v_sync_width * 2, } , { /* unused */ }, }; ipu_di_write(di, v_total - 1, DI_SCR_CONF); if (sig->hsync_pin == 2 && sig->vsync_pin == 3) ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg)); else ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga)); } static void ipu_di_config_clock(struct ipu_di *di, const struct ipu_di_signal_cfg *sig) { struct clk *clk; unsigned clkgen0; uint32_t val; if (sig->clkflags & IPU_DI_CLKMODE_EXT) { /* * CLKMODE_EXT means we must use the DI clock: this is * needed for things like LVDS which needs to feed the * DI and LDB with the same pixel clock. */ clk = di->clk_di; if (sig->clkflags & IPU_DI_CLKMODE_SYNC) { /* * CLKMODE_SYNC means that we want the DI to be * clocked at the same rate as the parent clock. * This is needed (eg) for LDB which needs to be * fed with the same pixel clock. We assume that * the LDB clock has already been set correctly. */ clkgen0 = 1 << 4; } else { /* * We can use the divider. We should really have * a flag here indicating whether the bridge can * cope with a fractional divider or not. For the * time being, let's go for simplicitly and * reliability. */ unsigned long in_rate; unsigned div; clk_set_rate(clk, sig->pixelclock); in_rate = clk_get_rate(clk); div = (in_rate + sig->pixelclock / 2) / sig->pixelclock; if (div == 0) div = 1; clkgen0 = div << 4; } } else { /* * For other interfaces, we can arbitarily select between * the DI specific clock and the internal IPU clock. See * DI_GENERAL bit 20. We select the IPU clock if it can * give us a clock rate within 1% of the requested frequency, * otherwise we use the DI clock. */ unsigned long rate, clkrate; unsigned div, error; clkrate = clk_get_rate(di->clk_ipu); div = (clkrate + sig->pixelclock / 2) / sig->pixelclock; rate = clkrate / div; error = rate / (sig->pixelclock / 1000); dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n", rate, div, (signed)(error - 1000) / 10, error % 10); /* Allow a 1% error */ if (error < 1010 && error >= 990) { clk = di->clk_ipu; clkgen0 = div << 4; } else { unsigned long in_rate; unsigned div; clk = di->clk_di; clk_set_rate(clk, sig->pixelclock); in_rate = clk_get_rate(clk); div = (in_rate + sig->pixelclock / 2) / sig->pixelclock; if (div == 0) div = 1; clkgen0 = div << 4; } } di->clk_di_pixel = clk; /* Set the divider */ ipu_di_write(di, clkgen0, DI_BS_CLKGEN0); /* * Set the high/low periods. Bits 24:16 give us the falling edge, * and bits 8:0 give the rising edge. LSB is fraction, and is * based on the divider above. We want a 50% duty cycle, so set * the falling edge to be half the divider. */ ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1); /* Finally select the input clock */ val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT; if (clk == di->clk_di) val |= DI_GEN_DI_CLK_EXT; ipu_di_write(di, val, DI_GENERAL); dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n", sig->pixelclock, clk_get_rate(di->clk_ipu), clk_get_rate(di->clk_di), clk == di->clk_di ? "DI" : "IPU", clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4)); } int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig) { u32 reg; u32 di_gen, vsync_cnt; u32 div; u32 h_total, v_total; dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n", di->id, sig->width, sig->height); if ((sig->v_sync_width == 0) || (sig->h_sync_width == 0)) return -EINVAL; h_total = sig->width + sig->h_sync_width + sig->h_start_width + sig->h_end_width; v_total = sig->height + sig->v_sync_width + sig->v_start_width + sig->v_end_width; dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n", clk_get_rate(di->clk_ipu), clk_get_rate(di->clk_di), sig->pixelclock); mutex_lock(&di_mutex); ipu_di_config_clock(di, sig); div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff; div = div / 16; /* Now divider is integer portion */ /* Setup pixel clock timing */ /* Down time is half of period */ ipu_di_write(di, (div << 16), DI_BS_CLKGEN1); ipu_di_data_wave_config(di, SYNC_WAVE, div - 1, div - 1); ipu_di_data_pin_config(di, SYNC_WAVE, DI_PIN15, 3, 0, div * 2); di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT; di_gen |= DI_GEN_DI_VSYNC_EXT; if (sig->interlaced) { ipu_di_sync_config_interlaced(di, sig); /* set y_sel = 1 */ di_gen |= 0x10000000; di_gen |= DI_GEN_POLARITY_5; di_gen |= DI_GEN_POLARITY_8; vsync_cnt = 7; if (sig->Hsync_pol) di_gen |= DI_GEN_POLARITY_3; if (sig->Vsync_pol) di_gen |= DI_GEN_POLARITY_2; } else { ipu_di_sync_config_noninterlaced(di, sig, div); vsync_cnt = 3; if (di->id == 1) /* * TODO: change only for TVEv2, parallel display * uses pin 2 / 3 */ if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3)) vsync_cnt = 6; if (sig->Hsync_pol) { if (sig->hsync_pin == 2) di_gen |= DI_GEN_POLARITY_2; else if (sig->hsync_pin == 4) di_gen |= DI_GEN_POLARITY_4; else if (sig->hsync_pin == 7) di_gen |= DI_GEN_POLARITY_7; } if (sig->Vsync_pol) { if (sig->vsync_pin == 3) di_gen |= DI_GEN_POLARITY_3; else if (sig->vsync_pin == 6) di_gen |= DI_GEN_POLARITY_6; else if (sig->vsync_pin == 8) di_gen |= DI_GEN_POLARITY_8; } } if (!sig->clk_pol) di_gen |= DI_GEN_POLARITY_DISP_CLK; ipu_di_write(di, di_gen, DI_GENERAL); ipu_di_write(di, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) | 0x00000002, DI_SYNC_AS_GEN); reg = ipu_di_read(di, DI_POL); reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15); if (sig->enable_pol) reg |= DI_POL_DRDY_POLARITY_15; if (sig->data_pol) reg |= DI_POL_DRDY_DATA_POLARITY; ipu_di_write(di, reg, DI_POL); mutex_unlock(&di_mutex); return 0; } EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel); int ipu_di_enable(struct ipu_di *di) { int ret; WARN_ON(IS_ERR(di->clk_di_pixel)); ret = clk_prepare_enable(di->clk_di_pixel); if (ret) return ret; ipu_module_enable(di->ipu, di->module); return 0; } EXPORT_SYMBOL_GPL(ipu_di_enable); int ipu_di_disable(struct ipu_di *di) { WARN_ON(IS_ERR(di->clk_di_pixel)); ipu_module_disable(di->ipu, di->module); clk_disable_unprepare(di->clk_di_pixel); return 0; } EXPORT_SYMBOL_GPL(ipu_di_disable); int ipu_di_get_num(struct ipu_di *di) { return di->id; } EXPORT_SYMBOL_GPL(ipu_di_get_num); static DEFINE_MUTEX(ipu_di_lock); struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp) { struct ipu_di *di; if (disp > 1) return ERR_PTR(-EINVAL); di = ipu->di_priv[disp]; mutex_lock(&ipu_di_lock); if (di->inuse) { di = ERR_PTR(-EBUSY); goto out; } di->inuse = true; out: mutex_unlock(&ipu_di_lock); return di; } EXPORT_SYMBOL_GPL(ipu_di_get); void ipu_di_put(struct ipu_di *di) { mutex_lock(&ipu_di_lock); di->inuse = false; mutex_unlock(&ipu_di_lock); } EXPORT_SYMBOL_GPL(ipu_di_put); int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, unsigned long base, u32 module, struct clk *clk_ipu) { struct ipu_di *di; if (id > 1) return -ENODEV; di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL); if (!di) return -ENOMEM; ipu->di_priv[id] = di; di->clk_di = devm_clk_get(dev, id ? "di1" : "di0"); if (IS_ERR(di->clk_di)) return PTR_ERR(di->clk_di); di->module = module; di->id = id; di->clk_ipu = clk_ipu; di->base = devm_ioremap(dev, base, PAGE_SIZE); if (!di->base) return -ENOMEM; ipu_di_write(di, 0x10, DI_BS_CLKGEN0); dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n", id, base, di->base); di->inuse = false; di->ipu = ipu; return 0; } void ipu_di_exit(struct ipu_soc *ipu, int id) { }
/*! * \brief Get a string description of an RDMnet connection failure event. * * An RDMnet connection failure event provides a high-level reason why an RDMnet connection failed. * * \param[in] event Event code. * \return String, or NULL if event is invalid. */ const char* rdmnet_connect_fail_event_to_string(rdmnet_connect_fail_event_t event) { if (event >= 0 && event < NUM_CONNECT_FAIL_EVENT_STRINGS) return kRdmnetConnectFailEventStrings[event]; return NULL; }
/** * Initialize an object of type of_bsn_tlv_udf_data. * * @param obj Pointer to the object to initialize * @param version The wire version to use for the object * @param bytes How many bytes in the object * @param clean_wire Boolean: If true, clear the wire object control struct * * If bytes < 0, then the default fixed length is used for the object * * This is a "coerce" function that sets up the pointers for the * accessors properly. * * If anything other than 0 is passed in for the buffer size, the underlying * wire buffer will have 'grow' called. */ void of_bsn_tlv_udf_data_init(of_object_t *obj, of_version_t version, int bytes, int clean_wire) { LOCI_ASSERT(of_object_fixed_len[version][OF_BSN_TLV_UDF_DATA] >= 0); if (clean_wire) { MEMSET(obj, 0, sizeof(*obj)); } if (bytes < 0) { bytes = of_object_fixed_len[version][OF_BSN_TLV_UDF_DATA]; } obj->version = version; obj->length = bytes; obj->object_id = OF_BSN_TLV_UDF_DATA; if (obj->wbuf != NULL) { int tot_bytes; tot_bytes = bytes + obj->obj_offset; of_wire_buffer_grow(obj->wbuf, tot_bytes); } }
/**************************************************************************** * Print an alignment in CLUSTALW format. ****************************************************************************/ void print_clustalw( FILE* outfile, BOOLEAN_T show_residue_count, ALIGNMENT_T* an_alignment ) { int i = 0; int length = 0; int num_sequences = 0; int seq_index = 0; int num_blocks = 0; int block_index = 0; int remainder = 0; int name_length = 0; int num_spaces = 0; int max_seq_name = 0; SEQ_T* seq = NULL; char* name = NULL; char* consensus_string = NULL; char* seq_string = NULL; int* residue_counts = NULL; char seq_block[BLOCKSIZE + 1]; assert(an_alignment != NULL); num_sequences = get_num_aligned_sequences(an_alignment); residue_counts = (int *) mm_malloc(num_sequences * sizeof(int)); if (residue_counts == NULL) { die("Error allocating memory to track residue counts\n"); } memset(residue_counts, 0, num_sequences * sizeof(int)); length = get_alignment_length(an_alignment); num_blocks = length/BLOCKSIZE; remainder = (length % BLOCKSIZE); if (remainder) num_blocks++; fputs("CLUSTAL W\n", outfile); fputs("\n\n", outfile); for (seq_index = 0; seq_index < num_sequences; seq_index++) { seq = get_alignment_sequence(seq_index, an_alignment); name_length = strlen(get_seq_name(seq)); max_seq_name = max_seq_name > name_length ? max_seq_name : name_length; } max_seq_name += 12; for (block_index = 0; block_index < num_blocks; block_index++) { for (seq_index = 0; seq_index < num_sequences; seq_index++) { seq = get_alignment_sequence(seq_index, an_alignment); assert(seq != NULL); name = get_seq_name(seq); fputs(name, outfile); num_spaces = max_seq_name - strlen(name); for (i = 0; i < num_spaces; i++) { fputc(' ', outfile); } seq_string = get_raw_sequence(seq); seq_block[0] = '\0'; strncat(seq_block, (seq_string + (block_index * BLOCKSIZE)), BLOCKSIZE); residue_counts[seq_index] += count_residues(seq_block); fputs(seq_block, outfile); if (show_residue_count) { fprintf(outfile, " %d", residue_counts[seq_index]); } fputc('\n', outfile); } seq_block[0] = '\0'; consensus_string = get_consensus_string(an_alignment); strncat(seq_block, consensus_string + (block_index * BLOCKSIZE), BLOCKSIZE); for (i = 0; i < max_seq_name; i++) { fputc(' ', outfile); } fprintf(outfile, "%s\n\n", seq_block); } if (residue_counts) myfree(residue_counts); }
/* Get an inode object given its location and corresponding root. * Returns in *is_new if the inode was read from disk */ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *new) { struct inode *inode; inode = btrfs_iget_locked(s, location, root); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { int ret; ret = btrfs_read_locked_inode(inode); if (!is_bad_inode(inode)) { inode_tree_add(inode); unlock_new_inode(inode); if (new) *new = 1; } else { unlock_new_inode(inode); iput(inode); ASSERT(ret < 0); inode = ERR_PTR(ret < 0 ? ret : -ESTALE); } } return inode; }
/* * Flags current command inside current transaction with AES_NOCRYPT_FLAG. */ static void SetNotEncryptDecryptTTS(void) { LocalTransactionId lxid; lxid = MyProc->lxid; command_crypt_state_set(lxid, AES_NOCRYPT_FLAG, NULL); }
/* Handle a non-simple (simple meaning requires no iteration), constraint (IE *x = &y, x = *y, *x = y, and x = y with offsets involved). */ static void do_complex_constraint (constraint_graph_t graph, constraint_t c, bitmap delta, bitmap *expanded_delta) { if (c->lhs.type == DEREF) { if (c->rhs.type == ADDRESSOF) { gcc_unreachable (); } else { do_ds_constraint (c, delta, expanded_delta); } } else if (c->rhs.type == DEREF) { if (!(get_varinfo (c->lhs.var)->is_special_var)) do_sd_constraint (graph, c, delta, expanded_delta); } else { bitmap tmp; bool flag = false; gcc_checking_assert (c->rhs.type == SCALAR && c->lhs.type == SCALAR && c->rhs.offset != 0 && c->lhs.offset == 0); tmp = get_varinfo (c->lhs.var)->solution; flag = set_union_with_increment (tmp, delta, c->rhs.offset, expanded_delta); if (flag) bitmap_set_bit (changed, c->lhs.var); } }
/* * Make sure there is room for at least one more entry in a ResourceOwner's * files reference array. * * This is separate from actually inserting an entry because if we run out * of memory, it's critical to do so *before* acquiring the resource. */ void ResourceOwnerEnlargeFiles(ResourceOwner owner) { ResourceArrayEnlarge(&(owner->filearr)); }
//////////////////////////////////////////////////////////////////////////// // // GetMBDefaultComp // // Translates the wide character string to a multibyte string and returns // the number of bytes written. This also checks for the use of the default // character and tries to convert composite forms to precomposed forms, so // the translation is a lot slower. // // 05-31-91 JulieB Created. //////////////////////////////////////////////////////////////////////////// int GetMBDefaultComp( PCP_HASH pHashN, LPWSTR pWCStr, LPWSTR pEndWCStr, LPBYTE pMBStr, int cbMultiByte, WORD wDefault, LPBOOL pUsedDef, DWORD dwFlags) { int mbIncr; int mbCount = 0; LPBYTE pEndMBStr; BOOL fError; if (cbMultiByte == 0) { BYTE pTempStr[2]; SET_MSB(dwFlags); if (IS_SBCS_CP(pHashN)) { while (pWCStr < pEndWCStr) { mbCount += GetMBCompSB( pHashN, dwFlags, pWCStr, pTempStr, mbCount, wDefault, pUsedDef ); pWCStr++; } } else { while (pWCStr < pEndWCStr) { mbCount += GetMBCompMB( pHashN, dwFlags, pWCStr, pTempStr, mbCount, wDefault, pUsedDef, &fError, FALSE ); pWCStr++; } } } else { pEndMBStr = pMBStr + cbMultiByte; if (IS_SBCS_CP(pHashN)) { while ((pWCStr < pEndWCStr) && (pMBStr < pEndMBStr)) { mbIncr = GetMBCompSB( pHashN, dwFlags, pWCStr, pMBStr, mbCount, wDefault, pUsedDef ); pWCStr++; mbCount += mbIncr; pMBStr += mbIncr; } } else { while ((pWCStr < pEndWCStr) && (pMBStr < pEndMBStr)) { mbIncr = GetMBCompMB( pHashN, dwFlags, pWCStr, pMBStr, mbCount, wDefault, pUsedDef, &fError, ((pMBStr + 1) < pEndMBStr) ? FALSE : TRUE ); if (fError) { break; } pWCStr++; mbCount += mbIncr; pMBStr += mbIncr; } } if (pWCStr < pEndWCStr) { SetLastError(ERROR_INSUFFICIENT_BUFFER); return (0); } } return (mbCount); }
/*========================================================================= Program: Visualization Toolkit Module: vtkSplineRepresentation.h Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ // .NAME vtkSplineRepresentation - representation for a spline. // .SECTION Description // vtkSplineRepresentation is a vtkWidgetRepresentation for a spline. // This 3D widget defines a spline that can be interactively placed in a // scene. The spline has handles, the number of which can be changed, plus it // can be picked on the spline itself to translate or rotate it in the scene. // This is based on vtkSplineWidget. // .SECTION See Also // vtkSplineWidget, vtkSplineWidget2 #ifndef vtkSplineRepresentation_h #define vtkSplineRepresentation_h #include "vtkInteractionWidgetsModule.h" // For export macro #include "vtkCurveRepresentation.h" class vtkActor; class vtkCellPicker; class vtkDoubleArray; class vtkParametricFunctionSource; class vtkParametricSpline; class vtkPlaneSource; class vtkPoints; class vtkPolyData; class vtkProp; class vtkProperty; class vtkSphereSource; class vtkTransform; class VTKINTERACTIONWIDGETS_EXPORT vtkSplineRepresentation : public vtkCurveRepresentation { public: static vtkSplineRepresentation* New(); vtkTypeMacro(vtkSplineRepresentation, vtkCurveRepresentation); void PrintSelf(ostream& os, vtkIndent indent); // Description: // Grab the polydata (including points) that defines the spline. The // polydata consists of points and line segments numbering Resolution + 1 // and Resolution, respectively. Points are guaranteed to be up-to-date when // either the InteractionEvent or EndInteraction events are invoked. The // user provides the vtkPolyData and the points and polyline are added to it. void GetPolyData(vtkPolyData *pd); // Description: // Set the number of handles for this widget. virtual void SetNumberOfHandles(int npts); // Description: // Set/Get the number of line segments representing the spline for // this widget. void SetResolution(int resolution); vtkGetMacro(Resolution,int); // Description: // Set the parametric spline object. Through vtkParametricSpline's API, the // user can supply and configure one of two types of spline: // vtkCardinalSpline, vtkKochanekSpline. The widget controls the open // or closed configuration of the spline. // WARNING: The widget does not enforce internal consistency so that all // three are of the same type. virtual void SetParametricSpline(vtkParametricSpline*); vtkGetObjectMacro(ParametricSpline,vtkParametricSpline); // Description: // Get the position of the spline handles. vtkDoubleArray* GetHandlePositions(); // Description: // Get the approximate vs. the true arc length of the spline. Calculated as // the summed lengths of the individual straight line segments. Use // SetResolution to control the accuracy. double GetSummedLength(); // Description: // Convenience method to allocate and set the handles from a vtkPoints // instance. If the first and last points are the same, the spline sets // Closed to the on InteractionState and disregards the last point, otherwise Closed // remains unchanged. void InitializeHandles(vtkPoints* points); // Description: // These are methods that satisfy vtkWidgetRepresentation's API. Note that a // version of place widget is available where the center and handle position // are specified. virtual void BuildRepresentation(); //BTX protected: vtkSplineRepresentation(); ~vtkSplineRepresentation(); // The spline vtkParametricSpline *ParametricSpline; vtkParametricFunctionSource *ParametricFunctionSource; // The number of line segments used to represent the spline. int Resolution; // Specialized method to insert a handle on the poly line. virtual void InsertHandleOnLine(double* pos); private: vtkSplineRepresentation(const vtkSplineRepresentation&); // Not implemented. void operator=(const vtkSplineRepresentation&); // Not implemented. //ETX }; #endif
// SPDX-License-Identifier: GPL-2.0 /* * mtu3_qmu.c - Queue Management Unit driver for device controller * * Copyright (C) 2016 MediaTek Inc. * * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> */ /* * Queue Management Unit (QMU) is designed to unload SW effort * to serve DMA interrupts. * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), * SW links data buffers and triggers QMU to send / receive data to * host / from device at a time. * And now only GPD is supported. * * For more detailed information, please refer to QMU Programming Guide */ #include <asm/cache.h> #include <cpu_func.h> #include <linux/iopoll.h> #include <linux/types.h> #include "mtu3.h" #define QMU_CHECKSUM_LEN 16 #define GPD_FLAGS_HWO BIT(0) #define GPD_FLAGS_BDP BIT(1) #define GPD_FLAGS_BPS BIT(2) #define GPD_FLAGS_IOC BIT(7) #define GPD_EXT_FLAG_ZLP BIT(5) #define DCACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE void mtu3_flush_cache(uintptr_t addr, u32 len) { WARN_ON(!(void *)addr || len == 0); flush_dcache_range(addr & ~(DCACHELINE_SIZE - 1), ALIGN(addr + len, DCACHELINE_SIZE)); } void mtu3_inval_cache(uintptr_t addr, u32 len) { WARN_ON(!(void *)addr || len == 0); invalidate_dcache_range(addr & ~(DCACHELINE_SIZE - 1), ALIGN(addr + len, DCACHELINE_SIZE)); } static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, dma_addr_t dma_addr) { dma_addr_t dma_base = ring->dma; struct qmu_gpd *gpd_head = ring->start; u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); if (offset >= MAX_GPD_NUM) return NULL; return gpd_head + offset; } static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) { dma_addr_t dma_base = ring->dma; struct qmu_gpd *gpd_head = ring->start; u32 offset; offset = gpd - gpd_head; if (offset >= MAX_GPD_NUM) return 0; return dma_base + (offset * sizeof(*gpd)); } static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) { ring->start = gpd; ring->enqueue = gpd; ring->dequeue = gpd; ring->end = gpd + MAX_GPD_NUM - 1; } static void reset_gpd_list(struct mtu3_ep *mep) { struct mtu3_gpd_ring *ring = &mep->gpd_ring; struct qmu_gpd *gpd = ring->start; if (gpd) { gpd->flag &= ~GPD_FLAGS_HWO; gpd_ring_init(ring, gpd); mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); } } int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) { struct qmu_gpd *gpd; struct mtu3_gpd_ring *ring = &mep->gpd_ring; /* software own all gpds as default */ gpd = memalign(DCACHELINE_SIZE, QMU_GPD_RING_SIZE); if (!gpd) return -ENOMEM; memset(gpd, 0, QMU_GPD_RING_SIZE); ring->dma = (dma_addr_t)gpd; gpd_ring_init(ring, gpd); mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); return 0; } void mtu3_gpd_ring_free(struct mtu3_ep *mep) { struct mtu3_gpd_ring *ring = &mep->gpd_ring; kfree(ring->start); memset(ring, 0, sizeof(*ring)); } void mtu3_qmu_resume(struct mtu3_ep *mep) { struct mtu3 *mtu = mep->mtu; void __iomem *mbase = mtu->mac_base; int epnum = mep->epnum; u32 offset; offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); mtu3_writel(mbase, offset, QMU_Q_RESUME); if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) mtu3_writel(mbase, offset, QMU_Q_RESUME); } static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) { if (ring->enqueue < ring->end) ring->enqueue++; else ring->enqueue = ring->start; return ring->enqueue; } static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) { if (ring->dequeue < ring->end) ring->dequeue++; else ring->dequeue = ring->start; return ring->dequeue; } /* check if a ring is emtpy */ static int gpd_ring_empty(struct mtu3_gpd_ring *ring) { struct qmu_gpd *enq = ring->enqueue; struct qmu_gpd *next; if (ring->enqueue < ring->end) next = enq + 1; else next = ring->start; /* one gpd is reserved to simplify gpd preparation */ return next == ring->dequeue; } int mtu3_prepare_transfer(struct mtu3_ep *mep) { return gpd_ring_empty(&mep->gpd_ring); } static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) { struct qmu_gpd *enq; struct mtu3_gpd_ring *ring = &mep->gpd_ring; struct qmu_gpd *gpd = ring->enqueue; struct usb_request *req = &mreq->request; /* set all fields to zero as default value */ memset(gpd, 0, sizeof(*gpd)); gpd->buffer = cpu_to_le32((u32)req->dma); gpd->buf_len = cpu_to_le16(req->length); /* get the next GPD */ enq = advance_enq_gpd(ring); dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n", mep->epnum, gpd, enq); enq->flag &= ~GPD_FLAGS_HWO; gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); mtu3_flush_cache((uintptr_t)enq, sizeof(*gpd)); if (req->zero) gpd->ext_flag |= GPD_EXT_FLAG_ZLP; gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO; mreq->gpd = gpd; if (req->length) mtu3_flush_cache((uintptr_t)req->buf, req->length); mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); return 0; } static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) { struct qmu_gpd *enq; struct mtu3_gpd_ring *ring = &mep->gpd_ring; struct qmu_gpd *gpd = ring->enqueue; struct usb_request *req = &mreq->request; /* set all fields to zero as default value */ memset(gpd, 0, sizeof(*gpd)); gpd->buffer = cpu_to_le32((u32)req->dma); gpd->data_buf_len = cpu_to_le16(req->length); /* get the next GPD */ enq = advance_enq_gpd(ring); dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n", mep->epnum, gpd, enq); enq->flag &= ~GPD_FLAGS_HWO; gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); mtu3_flush_cache((uintptr_t)enq, sizeof(*gpd)); gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO; mreq->gpd = gpd; mtu3_inval_cache((uintptr_t)req->buf, req->length); mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); return 0; } void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) { if (mep->is_in) mtu3_prepare_tx_gpd(mep, mreq); else mtu3_prepare_rx_gpd(mep, mreq); } int mtu3_qmu_start(struct mtu3_ep *mep) { struct mtu3 *mtu = mep->mtu; void __iomem *mbase = mtu->mac_base; struct mtu3_gpd_ring *ring = &mep->gpd_ring; u8 epnum = mep->epnum; if (mep->is_in) { /* set QMU start address */ mtu3_writel(mbase, USB_QMU_TQSAR(epnum), ring->dma); mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN); /* send zero length packet according to ZLP flag in GPD */ mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); mtu3_writel(mbase, U3D_TQERRIESR0, QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); return 0; } mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); } else { mtu3_writel(mbase, USB_QMU_RQSAR(epnum), ring->dma); mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN); /* don't expect ZLP */ mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); /* move to next GPD when receive ZLP */ mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); mtu3_writel(mbase, U3D_RQERRIESR0, QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); return 0; } mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); } return 0; } /* may called in atomic context */ void mtu3_qmu_stop(struct mtu3_ep *mep) { struct mtu3 *mtu = mep->mtu; void __iomem *mbase = mtu->mac_base; int epnum = mep->epnum; u32 value = 0; u32 qcsr; int ret; qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); return; } mtu3_writel(mbase, qcsr, QMU_Q_STOP); ret = readl_poll_timeout(mbase + qcsr, value, !(value & QMU_Q_ACTIVE), 1000); if (ret) { dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); return; } dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); } void mtu3_qmu_flush(struct mtu3_ep *mep) { dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, ((mep->is_in) ? "TX" : "RX")); /*Stop QMU */ mtu3_qmu_stop(mep); reset_gpd_list(mep); } /* * NOTE: request list maybe is already empty as following case: * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> * queue_tx --> process_tasklet(meanwhile, the second one is transferred, * tasklet process both of them)-->qmu_interrupt for second one. * To avoid upper case, put qmu_done_tx in ISR directly to process it. */ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) { struct mtu3_ep *mep = mtu->in_eps + epnum; struct mtu3_gpd_ring *ring = &mep->gpd_ring; void __iomem *mbase = mtu->mac_base; struct qmu_gpd *gpd = ring->dequeue; struct qmu_gpd *gpd_current = NULL; struct usb_request *req = NULL; struct mtu3_request *mreq; dma_addr_t cur_gpd_dma; /*transfer phy address got from QMU register to virtual address */ cur_gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", __func__, epnum, gpd, gpd_current, ring->enqueue); while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { mreq = next_request(mep); if (!mreq || mreq->gpd != gpd) { dev_err(mtu->dev, "no correct TX req is found\n"); break; } req = &mreq->request; req->actual = le16_to_cpu(gpd->buf_len); mtu3_req_complete(mep, req, 0); gpd = advance_deq_gpd(ring); mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); } dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", __func__, epnum, ring->dequeue, ring->enqueue); } static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) { struct mtu3_ep *mep = mtu->out_eps + epnum; struct mtu3_gpd_ring *ring = &mep->gpd_ring; void __iomem *mbase = mtu->mac_base; struct qmu_gpd *gpd = ring->dequeue; struct qmu_gpd *gpd_current = NULL; struct usb_request *req = NULL; struct mtu3_request *mreq; dma_addr_t cur_gpd_dma; cur_gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", __func__, epnum, gpd, gpd_current, ring->enqueue); while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { mreq = next_request(mep); if (!mreq || mreq->gpd != gpd) { dev_err(mtu->dev, "no correct RX req is found\n"); break; } req = &mreq->request; req->actual = le16_to_cpu(gpd->buf_len); mtu3_req_complete(mep, req, 0); gpd = advance_deq_gpd(ring); mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); } dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", __func__, epnum, ring->dequeue, ring->enqueue); } static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) { int i; for (i = 1; i < mtu->num_eps; i++) { if (done_status & QMU_RX_DONE_INT(i)) qmu_done_rx(mtu, i); if (done_status & QMU_TX_DONE_INT(i)) qmu_done_tx(mtu, i); } } static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) { void __iomem *mbase = mtu->mac_base; u32 errval; int i; if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { errval = mtu3_readl(mbase, U3D_RQERRIR0); for (i = 1; i < mtu->num_eps; i++) { if (errval & QMU_RX_CS_ERR(i)) dev_err(mtu->dev, "Rx %d CS error!\n", i); if (errval & QMU_RX_LEN_ERR(i)) dev_err(mtu->dev, "RX %d Length error\n", i); } mtu3_writel(mbase, U3D_RQERRIR0, errval); } if (qmu_status & RXQ_ZLPERR_INT) { errval = mtu3_readl(mbase, U3D_RQERRIR1); for (i = 1; i < mtu->num_eps; i++) { if (errval & QMU_RX_ZLP_ERR(i)) dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); } mtu3_writel(mbase, U3D_RQERRIR1, errval); } if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { errval = mtu3_readl(mbase, U3D_TQERRIR0); for (i = 1; i < mtu->num_eps; i++) { if (errval & QMU_TX_CS_ERR(i)) dev_err(mtu->dev, "Tx %d checksum error!\n", i); if (errval & QMU_TX_LEN_ERR(i)) dev_err(mtu->dev, "Tx %d zlp error!\n", i); } mtu3_writel(mbase, U3D_TQERRIR0, errval); } } irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) { void __iomem *mbase = mtu->mac_base; u32 qmu_status; u32 qmu_done_status; /* U3D_QISAR1 is read update */ qmu_status = mtu3_readl(mbase, U3D_QISAR1); qmu_status &= mtu3_readl(mbase, U3D_QIER1); qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", (qmu_done_status & 0xFFFF), qmu_done_status >> 16, qmu_status); if (qmu_done_status) qmu_done_isr(mtu, qmu_done_status); if (qmu_status) qmu_exception_isr(mtu, qmu_status); return IRQ_HANDLED; } void mtu3_qmu_init(struct mtu3 *mtu) { compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); } void mtu3_qmu_exit(struct mtu3 *mtu) { }
/* * Entry point for the program. Sets up the microcontroller for use. */ void user_init(void) { uart_div_modify(0, UART_CLK_FREQ / 115200); wifi_init(); ota_init(); dbg_init(); system_os_task(pb_disconnect_task, PB_DISCONNECT_PRI, pb_queue, PB_DISCONNECT_QUEUE_LEN); gpio_init(); PIN_FUNC_SELECT(PERIPHS_IO_MUX_GPIO5_U, FUNC_GPIO5); gpio_output_set(0, 0, 0, BIT5); PIN_PULLUP_EN(PERIPHS_IO_MUX_GPIO5_U); ETS_GPIO_INTR_DISABLE(); gpio_intr_handler_register(gpio_interrupt, NULL); gpio_pin_intr_state_set(GPIO_ID_PIN(5), GPIO_PIN_INTR_NEGEDGE); ETS_GPIO_INTR_ENABLE(); }
/* Do deinterleaving for 4 64 bit vectors, for LD4 insns. */ static void math_DEINTERLEAVE4_64( IRTemp* u0, IRTemp* u1, IRTemp* u2, IRTemp* u3, UInt laneSzBlg2, IRTemp i0, IRTemp i1, IRTemp i2, IRTemp i3 ) { if (laneSzBlg2 == 3) { assign(*u0, EX(i0)); assign(*u1, EX(i1)); assign(*u2, EX(i2)); assign(*u3, EX(i3)); return; } vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); IRTemp di0 = newTempV128(); IRTemp di1 = newTempV128(); IRTemp di2 = newTempV128(); IRTemp di3 = newTempV128(); assign(di0, binop(doubler, EX(i0), EX(i0))); assign(di1, binop(doubler, EX(i1), EX(i1))); assign(di2, binop(doubler, EX(i2), EX(i2))); assign(di3, binop(doubler, EX(i3), EX(i3))); IRTemp du0 = newTempV128(); IRTemp du1 = newTempV128(); IRTemp du2 = newTempV128(); IRTemp du3 = newTempV128(); math_DEINTERLEAVE4_128(&du0, &du1, &du2, &du3, laneSzBlg2 + 1, di0, di1, di2, di3); assign(*u0, binop(halver, EX(du0), EX(du0))); assign(*u1, binop(halver, EX(du1), EX(du1))); assign(*u2, binop(halver, EX(du2), EX(du2))); assign(*u3, binop(halver, EX(du3), EX(du3))); }
// This file is a part of Julia. License is MIT: https://julialang.org/license #include "rle.h" #ifdef __cplusplus extern "C" { #endif /* iteration */ rle_iter_state rle_iter_init(uint64_t key0) { rle_iter_state state = {-1, 0, key0}; return state; } int rle_iter_increment(rle_iter_state *state, size_t len, uint64_t *rletable, size_t npairs) { state->i += 1; size_t i = state->i, j = state->j; if (i >= len) return 0; if (rletable) { while (j < npairs && i >= rletable[j+1]) { state->key = rletable[j]; j += 2; } state->j = j; } return 1; } /* indexing */ void rle_index_to_reference(rle_reference *rr, size_t i, uint64_t *rletable, size_t npairs, uint64_t key0) { if (!rletable) { rr->key = key0; rr->index = i; return; } // Determine the active key uint64_t key = key0; size_t jj = 0; while (jj < npairs && i >= rletable[jj+1]) { key = rletable[jj]; jj += 2; } // Subtract the number of preceding items with different keys uint64_t ckey = key0; size_t j, start = 0, index = i; for (j = 0; j < jj; j+=2) { if (key != ckey) index -= rletable[j+1] - start; ckey = rletable[j]; start = rletable[j+1]; } // Return the result rr->key = key; rr->index = index; return; } size_t rle_reference_to_index(rle_reference *rr, uint64_t *rletable, size_t npairs, uint64_t key0) { uint64_t key = rr->key; size_t index = rr->index, i = index; if (!rletable) { assert(key == key0); return i; } uint64_t ckey = key0; size_t j, start = 0, n; for (j = 0; j < npairs; j+=2) { n = rletable[j+1] - start; if (key != ckey) i += n; else { if (index < n) break; index -= n; } ckey = rletable[j]; start = rletable[j+1]; } return i; } #ifdef __cplusplus } #endif
/* Traverse the tree of user operator nodes. */ void gfc_traverse_user_op (gfc_namespace *ns, void (*func) (gfc_user_op *)) { traverse_uop (ns->uop_root, func); }
/* Test of remainder*() function family. Copyright (C) 2012-2018 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ static DOUBLE my_ldexp (DOUBLE x, int d) { for (; d > 0; d--) x *= L_(2.0); for (; d < 0; d++) x *= L_(0.5); return x; } static void test_function (void) { int i; int j; const DOUBLE TWO_MANT_DIG = /* Assume MANT_DIG <= 5 * 31. Use the identity n = floor(n/5) + floor((n+1)/5) + ... + floor((n+4)/5). */ (DOUBLE) (1U << ((MANT_DIG - 1) / 5)) * (DOUBLE) (1U << ((MANT_DIG - 1 + 1) / 5)) * (DOUBLE) (1U << ((MANT_DIG - 1 + 2) / 5)) * (DOUBLE) (1U << ((MANT_DIG - 1 + 3) / 5)) * (DOUBLE) (1U << ((MANT_DIG - 1 + 4) / 5)); /* Randomized tests. */ for (i = 0; i < SIZEOF (RANDOM) / 5; i++) for (j = 0; j < SIZEOF (RANDOM) / 5; j++) { DOUBLE x = L_(16.0) * RANDOM[i]; /* 0.0 <= x <= 16.0 */ DOUBLE y = RANDOM[j]; /* 0.0 <= y < 1.0 */ if (y > L_(0.0)) { DOUBLE z = REMAINDER (x, y); ASSERT (z >= - L_(0.5) * y); ASSERT (z <= L_(0.5) * y); z -= x - (int) ((L_(2.0) * x + y) / (L_(2.0) * y)) * y; ASSERT (/* The common case. */ (z > - L_(2.0) * L_(16.0) / TWO_MANT_DIG && z < L_(2.0) * L_(16.0) / TWO_MANT_DIG) || /* rounding error: 2x+y / 2y computed too large */ (z > y - L_(2.0) * L_(16.0) / TWO_MANT_DIG && z < y + L_(2.0) * L_(16.0) / TWO_MANT_DIG) || /* rounding error: 2x+y / 2y computed too small */ (z > - y - L_(2.0) * L_(16.0) / TWO_MANT_DIG && z < - y + L_(2.0) * L_(16.0) / TWO_MANT_DIG)); } } for (i = 0; i < SIZEOF (RANDOM) / 5; i++) for (j = 0; j < SIZEOF (RANDOM) / 5; j++) { DOUBLE x = L_(1.0e9) * RANDOM[i]; /* 0.0 <= x <= 10^9 */ DOUBLE y = RANDOM[j]; /* 0.0 <= y < 1.0 */ if (y > L_(0.0)) { DOUBLE z = REMAINDER (x, y); DOUBLE r; ASSERT (z >= - L_(0.5) * y); ASSERT (z <= L_(0.5) * y); { /* Determine the quotient 2x+y / 2y in two steps, because it may be > 2^31. */ int q1 = (int) (x / y / L_(65536.0)); int q2 = (int) ((L_(2.0) * (x - q1 * L_(65536.0) * y) + y) / (L_(2.0) * y)); DOUBLE q = (DOUBLE) q1 * L_(65536.0) + (DOUBLE) q2; r = x - q * y; } /* The absolute error of z can be up to 1e9/2^MANT_DIG. The absolute error of r can also be up to 1e9/2^MANT_DIG. Therefore the error of z - r can be twice as large. */ z -= r; ASSERT (/* The common case. */ (z > - L_(2.0) * L_(1.0e9) / TWO_MANT_DIG && z < L_(2.0) * L_(1.0e9) / TWO_MANT_DIG) || /* rounding error: 2x+y / 2y computed too large */ (z > y - L_(2.0) * L_(1.0e9) / TWO_MANT_DIG && z < y + L_(2.0) * L_(1.0e9) / TWO_MANT_DIG) || /* rounding error: 2x+y / 2y computed too small */ (z > - y - L_(2.0) * L_(1.0e9) / TWO_MANT_DIG && z < - y + L_(2.0) * L_(1.0e9) / TWO_MANT_DIG)); } } { int large_exp = (MAX_EXP - 1 < 1000 ? MAX_EXP - 1 : 1000); DOUBLE large = my_ldexp (L_(1.0), large_exp); /* = 2^large_exp */ for (i = 0; i < SIZEOF (RANDOM) / 10; i++) for (j = 0; j < SIZEOF (RANDOM) / 10; j++) { DOUBLE x = large * RANDOM[i]; /* 0.0 <= x <= 2^large_exp */ DOUBLE y = RANDOM[j]; /* 0.0 <= y < 1.0 */ if (y > L_(0.0)) { DOUBLE z = REMAINDER (x, y); /* Regardless how large the rounding errors are, the result must be >= -y/2, <= y/2. */ ASSERT (z >= - L_(0.5) * y); ASSERT (z <= L_(0.5) * y); } } } } volatile DOUBLE x; volatile DOUBLE y; DOUBLE z;
/* Return non-zero if OBJFILE has full symbols. */ int objfile_has_full_symbols (struct objfile *objfile) { return objfile->symtabs != NULL; }
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/include/lprocfs_status.h * * Top level header file for LProc SNMP * * Author: Hariharan Thantry thantry@users.sourceforge.net */ #ifndef _LPROCFS_SNMP_H #define _LPROCFS_SNMP_H #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> #include "../../include/linux/libcfs/libcfs.h" #include "lustre_cfg.h" #include "lustre/lustre_idl.h" struct lprocfs_vars { const char *name; const struct file_operations *fops; void *data; /** * sysfs file mode. */ umode_t proc_mode; }; struct lprocfs_static_vars { struct lprocfs_vars *obd_vars; struct attribute_group *sysfs_vars; }; /* if we find more consumers this could be generalized */ #define OBD_HIST_MAX 32 struct obd_histogram { spinlock_t oh_lock; unsigned long oh_buckets[OBD_HIST_MAX]; }; enum { BRW_R_PAGES = 0, BRW_W_PAGES, BRW_R_RPC_HIST, BRW_W_RPC_HIST, BRW_R_IO_TIME, BRW_W_IO_TIME, BRW_R_DISCONT_PAGES, BRW_W_DISCONT_PAGES, BRW_R_DISCONT_BLOCKS, BRW_W_DISCONT_BLOCKS, BRW_R_DISK_IOSIZE, BRW_W_DISK_IOSIZE, BRW_R_DIO_FRAGS, BRW_W_DIO_FRAGS, BRW_LAST, }; struct brw_stats { struct obd_histogram hist[BRW_LAST]; }; enum { RENAME_SAMEDIR_SIZE = 0, RENAME_CROSSDIR_SRC_SIZE, RENAME_CROSSDIR_TGT_SIZE, RENAME_LAST, }; struct rename_stats { struct obd_histogram hist[RENAME_LAST]; }; /* An lprocfs counter can be configured using the enum bit masks below. * * LPROCFS_CNTR_EXTERNALLOCK indicates that an external lock already * protects this counter from concurrent updates. If not specified, * lprocfs an internal per-counter lock variable. External locks are * not used to protect counter increments, but are used to protect * counter readout and resets. * * LPROCFS_CNTR_AVGMINMAX indicates a multi-valued counter samples, * (i.e. counter can be incremented by more than "1"). When specified, * the counter maintains min, max and sum in addition to a simple * invocation count. This allows averages to be be computed. * If not specified, the counter is an increment-by-1 counter. * min, max, sum, etc. are not maintained. * * LPROCFS_CNTR_STDDEV indicates that the counter should track sum of * squares (for multi-valued counter samples only). This allows * external computation of standard deviation, but involves a 64-bit * multiply per counter increment. */ enum { LPROCFS_CNTR_EXTERNALLOCK = 0x0001, LPROCFS_CNTR_AVGMINMAX = 0x0002, LPROCFS_CNTR_STDDEV = 0x0004, /* counter data type */ LPROCFS_TYPE_REGS = 0x0100, LPROCFS_TYPE_BYTES = 0x0200, LPROCFS_TYPE_PAGES = 0x0400, LPROCFS_TYPE_CYCLE = 0x0800, }; #define LC_MIN_INIT ((~(__u64)0) >> 1) struct lprocfs_counter_header { unsigned int lc_config; const char *lc_name; /* must be static */ const char *lc_units; /* must be static */ }; struct lprocfs_counter { __s64 lc_count; __s64 lc_min; __s64 lc_max; __s64 lc_sumsquare; /* * Every counter has lc_array_sum[0], while lc_array_sum[1] is only * for irq context counter, i.e. stats with * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need * lc_array_sum[1] */ __s64 lc_array_sum[1]; }; #define lc_sum lc_array_sum[0] #define lc_sum_irq lc_array_sum[1] struct lprocfs_percpu { #ifndef __GNUC__ __s64 pad; #endif struct lprocfs_counter lp_cntr[0]; }; enum lprocfs_stats_lock_ops { LPROCFS_GET_NUM_CPU = 0x0001, /* number allocated per-CPU stats */ LPROCFS_GET_SMP_ID = 0x0002, /* current stat to be updated */ }; enum lprocfs_stats_flags { LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu * area and need locking */ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */ }; enum lprocfs_fields_flags { LPROCFS_FIELDS_FLAGS_CONFIG = 0x0001, LPROCFS_FIELDS_FLAGS_SUM = 0x0002, LPROCFS_FIELDS_FLAGS_MIN = 0x0003, LPROCFS_FIELDS_FLAGS_MAX = 0x0004, LPROCFS_FIELDS_FLAGS_AVG = 0x0005, LPROCFS_FIELDS_FLAGS_SUMSQUARE = 0x0006, LPROCFS_FIELDS_FLAGS_COUNT = 0x0007, }; struct lprocfs_stats { /* # of counters */ unsigned short ls_num; /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */ unsigned short ls_biggest_alloc_num; enum lprocfs_stats_flags ls_flags; /* Lock used when there are no percpu stats areas; For percpu stats, * it is used to protect ls_biggest_alloc_num change */ spinlock_t ls_lock; /* has ls_num of counter headers */ struct lprocfs_counter_header *ls_cnt_header; struct lprocfs_percpu *ls_percpu[0]; }; #define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC) /* Pack all opcodes down into a single monotonically increasing index */ static inline int opcode_offset(__u32 opc) { if (opc < OST_LAST_OPC) { /* OST opcode */ return (opc - OST_FIRST_OPC); } else if (opc < MDS_LAST_OPC) { /* MDS opcode */ return (opc - MDS_FIRST_OPC + OPC_RANGE(OST)); } else if (opc < LDLM_LAST_OPC) { /* LDLM Opcode */ return (opc - LDLM_FIRST_OPC + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < MGS_LAST_OPC) { /* MGS Opcode */ return (opc - MGS_FIRST_OPC + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < OBD_LAST_OPC) { /* OBD Ping */ return (opc - OBD_FIRST_OPC + OPC_RANGE(MGS) + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < LLOG_LAST_OPC) { /* LLOG Opcode */ return (opc - LLOG_FIRST_OPC + OPC_RANGE(OBD) + OPC_RANGE(MGS) + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < QUOTA_LAST_OPC) { /* LQUOTA Opcode */ return (opc - QUOTA_FIRST_OPC + OPC_RANGE(LLOG) + OPC_RANGE(OBD) + OPC_RANGE(MGS) + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < SEQ_LAST_OPC) { /* SEQ opcode */ return (opc - SEQ_FIRST_OPC + OPC_RANGE(QUOTA) + OPC_RANGE(LLOG) + OPC_RANGE(OBD) + OPC_RANGE(MGS) + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < SEC_LAST_OPC) { /* SEC opcode */ return (opc - SEC_FIRST_OPC + OPC_RANGE(SEQ) + OPC_RANGE(QUOTA) + OPC_RANGE(LLOG) + OPC_RANGE(OBD) + OPC_RANGE(MGS) + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else if (opc < FLD_LAST_OPC) { /* FLD opcode */ return (opc - FLD_FIRST_OPC + OPC_RANGE(SEC) + OPC_RANGE(SEQ) + OPC_RANGE(QUOTA) + OPC_RANGE(LLOG) + OPC_RANGE(OBD) + OPC_RANGE(MGS) + OPC_RANGE(LDLM) + OPC_RANGE(MDS) + OPC_RANGE(OST)); } else { /* Unknown Opcode */ return -1; } } #define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \ OPC_RANGE(MDS) + \ OPC_RANGE(LDLM) + \ OPC_RANGE(MGS) + \ OPC_RANGE(OBD) + \ OPC_RANGE(LLOG) + \ OPC_RANGE(SEC) + \ OPC_RANGE(SEQ) + \ OPC_RANGE(SEC) + \ OPC_RANGE(FLD)) #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \ OPC_RANGE(EXTRA)) enum { PTLRPC_REQWAIT_CNTR = 0, PTLRPC_REQQDEPTH_CNTR, PTLRPC_REQACTIVE_CNTR, PTLRPC_TIMEOUT, PTLRPC_REQBUF_AVAIL_CNTR, PTLRPC_LAST_CNTR }; #define PTLRPC_FIRST_CNTR PTLRPC_REQWAIT_CNTR enum { LDLM_GLIMPSE_ENQUEUE = 0, LDLM_PLAIN_ENQUEUE, LDLM_EXTENT_ENQUEUE, LDLM_FLOCK_ENQUEUE, LDLM_IBITS_ENQUEUE, MDS_REINT_SETATTR, MDS_REINT_CREATE, MDS_REINT_LINK, MDS_REINT_UNLINK, MDS_REINT_RENAME, MDS_REINT_OPEN, MDS_REINT_SETXATTR, BRW_READ_BYTES, BRW_WRITE_BYTES, EXTRA_LAST_OPC }; #define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE /* class_obd.c */ extern struct dentry *debugfs_lustre_root; extern struct kobject *lustre_kobj; struct obd_device; struct obd_histogram; /* Days / hours / mins / seconds format */ struct dhms { int d, h, m, s; }; static inline void s2dhms(struct dhms *ts, time64_t secs64) { unsigned int secs; ts->d = div_u64_rem(secs64, 86400, &secs); ts->h = secs / 3600; secs = secs % 3600; ts->m = secs / 60; ts->s = secs % 60; } #define DHMS_FMT "%dd%dh%02dm%02ds" #define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s #define JOBSTATS_JOBID_VAR_MAX_LEN 20 #define JOBSTATS_DISABLE "disable" #define JOBSTATS_PROCNAME_UID "procname_uid" #define JOBSTATS_NODELOCAL "nodelocal" /* obd_config.c */ void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)); int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count, int *val, int mult); int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, int mult); int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid); int lprocfs_stats_lock(struct lprocfs_stats *stats, enum lprocfs_stats_lock_ops opc, unsigned long *flags); void lprocfs_stats_unlock(struct lprocfs_stats *stats, enum lprocfs_stats_lock_ops opc, unsigned long *flags); static inline unsigned int lprocfs_stats_counter_size(struct lprocfs_stats *stats) { unsigned int percpusize; percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]); /* irq safe stats need lc_array_sum[1] */ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) percpusize += stats->ls_num * sizeof(__s64); if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0) percpusize = L1_CACHE_ALIGN(percpusize); return percpusize; } static inline struct lprocfs_counter * lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid, int index) { struct lprocfs_counter *cntr; cntr = &stats->ls_percpu[cpuid]->lp_cntr[index]; if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) cntr = (void *)cntr + index * sizeof(__s64); return cntr; } /* Two optimized LPROCFS counter increment functions are provided: * lprocfs_counter_incr(cntr, value) - optimized for by-one counters * lprocfs_counter_add(cntr) - use for multi-valued counters * Counter data layout allows config flag, counter lock and the * count itself to reside within a single cache line. */ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount); void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount); #define lprocfs_counter_incr(stats, idx) \ lprocfs_counter_add(stats, idx, 1) #define lprocfs_counter_decr(stats, idx) \ lprocfs_counter_sub(stats, idx, 1) __s64 lprocfs_read_helper(struct lprocfs_counter *lc, struct lprocfs_counter_header *header, enum lprocfs_stats_flags flags, enum lprocfs_fields_flags field); __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx, enum lprocfs_fields_flags field); extern struct lprocfs_stats * lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags); void lprocfs_clear_stats(struct lprocfs_stats *stats); void lprocfs_free_stats(struct lprocfs_stats **stats); void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned int conf, const char *name, const char *units); struct obd_export; int lprocfs_exp_cleanup(struct obd_export *exp); struct dentry *ldebugfs_add_simple(struct dentry *root, char *name, void *data, const struct file_operations *fops); int ldebugfs_register_stats(struct dentry *parent, const char *name, struct lprocfs_stats *stats); /* lprocfs_status.c */ int ldebugfs_add_vars(struct dentry *parent, struct lprocfs_vars *var, void *data); struct dentry *ldebugfs_register(const char *name, struct dentry *parent, struct lprocfs_vars *list, void *data); void ldebugfs_remove(struct dentry **entryp); int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, struct attribute_group *attrs); int lprocfs_obd_cleanup(struct obd_device *obd); int ldebugfs_seq_create(struct dentry *parent, const char *name, umode_t mode, const struct file_operations *seq_fops, void *data); int ldebugfs_obd_seq_create(struct obd_device *dev, const char *name, umode_t mode, const struct file_operations *seq_fops, void *data); /* Generic callbacks */ int lprocfs_rd_uint(struct seq_file *m, void *data); int lprocfs_wr_uint(struct file *file, const char __user *buffer, unsigned long count, void *data); int lprocfs_rd_server_uuid(struct seq_file *m, void *data); int lprocfs_rd_conn_uuid(struct seq_file *m, void *data); int lprocfs_rd_import(struct seq_file *m, void *data); int lprocfs_rd_state(struct seq_file *m, void *data); int lprocfs_rd_connect_flags(struct seq_file *m, void *data); struct adaptive_timeout; int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at); int lprocfs_rd_timeouts(struct seq_file *m, void *data); int lprocfs_wr_ping(struct file *file, const char __user *buffer, size_t count, loff_t *off); int lprocfs_wr_import(struct file *file, const char __user *buffer, size_t count, loff_t *off); int lprocfs_rd_pinger_recov(struct seq_file *m, void *n); int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer, size_t count, loff_t *off); /* Statfs helpers */ int lprocfs_write_helper(const char __user *buffer, unsigned long count, int *val); int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, __u64 *val); int lprocfs_write_frac_u64_helper(const char __user *buffer, unsigned long count, __u64 *val, int mult); char *lprocfs_find_named_value(const char *buffer, const char *name, size_t *count); void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value); void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value); void lprocfs_oh_clear(struct obd_histogram *oh); unsigned long lprocfs_oh_sum(struct obd_histogram *oh); void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, struct lprocfs_counter *cnt); int lprocfs_single_release(struct inode *inode, struct file *file); int lprocfs_seq_release(struct inode *inode, struct file *file); /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only * proc entries; otherwise, you will define name##_seq_write function also for * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */ #define __LPROC_SEQ_FOPS(name, custom_seq_write) \ static int name##_single_open(struct inode *inode, struct file *file) \ { \ return single_open(file, name##_seq_show, inode->i_private); \ } \ static const struct file_operations name##_fops = { \ .owner = THIS_MODULE, \ .open = name##_single_open, \ .read = seq_read, \ .write = custom_seq_write, \ .llseek = seq_lseek, \ .release = lprocfs_single_release, \ } #define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL) #define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write) #define LPROC_SEQ_FOPS_RO_TYPE(name, type) \ static int name##_##type##_seq_show(struct seq_file *m, void *v)\ { \ return lprocfs_rd_##type(m, m->private); \ } \ LPROC_SEQ_FOPS_RO(name##_##type) #define LPROC_SEQ_FOPS_RW_TYPE(name, type) \ static int name##_##type##_seq_show(struct seq_file *m, void *v)\ { \ return lprocfs_rd_##type(m, m->private); \ } \ static ssize_t name##_##type##_seq_write(struct file *file, \ const char __user *buffer, size_t count, \ loff_t *off) \ { \ struct seq_file *seq = file->private_data; \ return lprocfs_wr_##type(file, buffer, \ count, seq->private); \ } \ LPROC_SEQ_FOPS(name##_##type) #define LPROC_SEQ_FOPS_WR_ONLY(name, type) \ static ssize_t name##_##type##_write(struct file *file, \ const char __user *buffer, size_t count, \ loff_t *off) \ { \ return lprocfs_wr_##type(file, buffer, count, off); \ } \ static int name##_##type##_open(struct inode *inode, struct file *file) \ { \ return single_open(file, NULL, inode->i_private); \ } \ static const struct file_operations name##_##type##_fops = { \ .open = name##_##type##_open, \ .write = name##_##type##_write, \ .release = lprocfs_single_release, \ } struct lustre_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len); }; #define LUSTRE_ATTR(name, mode, show, store) \ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store) #define LUSTRE_RO_ATTR(name) LUSTRE_ATTR(name, 0444, name##_show, NULL) #define LUSTRE_RW_ATTR(name) LUSTRE_ATTR(name, 0644, name##_show, name##_store) extern const struct sysfs_ops lustre_sysfs_ops; struct root_squash_info; int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count, struct root_squash_info *squash, char *name); int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count, struct root_squash_info *squash, char *name); /* all quota proc functions */ int lprocfs_quota_rd_bunit(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_bunit(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_btune(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_btune(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_iunit(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_iunit(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_itune(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_itune(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_type(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_switch_seconds(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_switch_qs(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_boundary_factor(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_least_bunit(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_least_iunit(struct file *file, const char *buffer, unsigned long count, void *data); int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off, int count, int *eof, void *data); int lprocfs_quota_wr_qs_factor(struct file *file, const char *buffer, unsigned long count, void *data); #endif /* LPROCFS_SNMP_H */
// Numbering order: buttons, then axes, then hats const char* PC_GetJoystickKeyName(int portnum, int keynum) { int keycode = (portnum*JOY_MAX_INPUTS) + keynum; int firstAxis = joysticks[portnum].NumButtons; int firstHat = firstAxis + (2*joysticks[portnum].NumAxes); int firstUnknown = firstHat + (4*joysticks[portnum].NumHats); if (keynum < firstAxis+1) return JoystickButtonNames[keycode]; else if (keynum < firstHat+1) return JoystickAxisNames[keycode-firstAxis]; else if (keynum < firstUnknown+1) return JoystickHatNames[keycode-firstHat]; else return JoystickUnknownNames[keycode-firstUnknown]; }
/* * Copyright (C) 2022 Gunar Schorcht * * This file is subject to the terms and conditions of the GNU Lesser * General Public License v2.1. See the file LICENSE in the top level * directory for more details. */ /** * @ingroup cpu_esp32 * @ingroup drivers_periph_gpio * @{ * * @file * @brief Architecture-specific GPIO definitions for ESP32-S2 variant (family) * * @author Gunar Schorcht <gunar@schorcht.net> * @} */ #include "gpio_arch.h" #include "soc/io_mux_reg.h" #if !IS_USED(MODULE_ESP_IDF_GPIO_HAL) /* GPIO to IOMUX register mapping (see Technical Reference, Section 5.14.2 Register Summary) https://www.espressif.com/sites/default/files/documentation/esp32-s2_technical_reference_manual_en.pdf */ const uint32_t _gpio_to_iomux_reg[GPIO_PIN_NUMOF] = { PERIPHS_IO_MUX_GPIO0_U, /* GPIO0 */ PERIPHS_IO_MUX_GPIO1_U, /* GPIO1 */ PERIPHS_IO_MUX_GPIO2_U, /* GPIO2 */ PERIPHS_IO_MUX_GPIO3_U, /* GPIO3 */ PERIPHS_IO_MUX_GPIO4_U, /* GPIO4 */ PERIPHS_IO_MUX_GPIO5_U, /* GPIO5 */ PERIPHS_IO_MUX_GPIO6_U, /* GPIO6 */ PERIPHS_IO_MUX_GPIO7_U, /* GPIO7 */ PERIPHS_IO_MUX_GPIO8_U, /* GPIO8 */ PERIPHS_IO_MUX_GPIO9_U, /* GPIO9 */ PERIPHS_IO_MUX_GPIO10_U, /* GPIO10 */ PERIPHS_IO_MUX_GPIO11_U, /* GPIO11 */ PERIPHS_IO_MUX_GPIO12_U, /* GPIO12 */ PERIPHS_IO_MUX_GPIO13_U, /* GPIO13 */ PERIPHS_IO_MUX_GPIO14_U, /* GPIO14 */ PERIPHS_IO_MUX_XTAL_32K_P_U, /* GPIO15 used for 32k XTAL */ PERIPHS_IO_MUX_XTAL_32K_N_U, /* GPIO16 used for 32k XTAL */ PERIPHS_IO_MUX_DAC_1_U, /* GPIO17 */ PERIPHS_IO_MUX_DAC_2_U, /* GPIO18 */ PERIPHS_IO_MUX_GPIO19_U, /* GPIO19 */ PERIPHS_IO_MUX_GPIO20_U, /* GPIO20 */ PERIPHS_IO_MUX_GPIO21_U, /* GPIO21 */ 0, /* GPIO22 is not available */ 0, /* GPIO23 is not available */ 0, /* GPIO24 is not available */ 0, /* GPIO25 is not available */ PERIPHS_IO_MUX_SPICS1_U, /* GPIO26 used as SPI CS1 */ PERIPHS_IO_MUX_SPIHD_U, /* GPIO27 used as SPI HS for Flash */ PERIPHS_IO_MUX_SPIWP_U, /* GPIO28 used as SPI WP for Flash */ PERIPHS_IO_MUX_SPICS0_U, /* GPIO29 used as SPI CS0 for Flash */ PERIPHS_IO_MUX_SPICLK_U, /* GPIO30 used as SPI CLK for Flash */ PERIPHS_IO_MUX_SPID_U, /* GPIO31 used as SPI D for Flash */ PERIPHS_IO_MUX_SPIQ_U, /* GPIO32 used as SPI Q for Flash */ PERIPHS_IO_MUX_GPIO33_U, /* GPIO33 */ PERIPHS_IO_MUX_GPIO34_U, /* GPIO34 */ PERIPHS_IO_MUX_GPIO35_U, /* GPIO35 */ PERIPHS_IO_MUX_GPIO36_U, /* GPIO36 */ PERIPHS_IO_MUX_GPIO37_U, /* GPIO37 */ PERIPHS_IO_MUX_GPIO38_U, /* GPIO38 */ PERIPHS_IO_MUX_MTCK_U, /* GPIO39 used as JTAG*/ PERIPHS_IO_MUX_MTDO_U, /* GPIO40 used as JTAG*/ PERIPHS_IO_MUX_MTDI_U, /* GPIO41 used as JTAG*/ PERIPHS_IO_MUX_MTMS_U, /* GPIO42 used as JTAG*/ PERIPHS_IO_MUX_U0TXD_U, /* GPIO43 used as UART0 TxD */ PERIPHS_IO_MUX_U0RXD_U, /* GPIO44 used as UART0 RxD */ PERIPHS_IO_MUX_GPIO45_U, /* GPIO45 */ PERIPHS_IO_MUX_GPIO46_U, /* GPIO46 */ }; #endif /* !IS_USED(MODULE_ESP_IDF_GPIO_HAL) */ /* Table of the usage type of each GPIO pin */ gpio_pin_usage_t _gpio_pin_usage[GPIO_PIN_NUMOF] = { _GPIO, /* GPIO0 */ _GPIO, /* GPIO1 */ _GPIO, /* GPIO2 */ _GPIO, /* GPIO3 */ _GPIO, /* GPIO4 */ _GPIO, /* GPIO5 */ _GPIO, /* GPIO6 */ _GPIO, /* GPIO7 */ _GPIO, /* GPIO8 */ _GPIO, /* GPIO9 */ _GPIO, /* GPIO10 */ _GPIO, /* GPIO11 */ _GPIO, /* GPIO12 */ _GPIO, /* GPIO13 */ _GPIO, /* GPIO14 */ #if MODULE_ESP_RTC_TIMER_32K _NOT_EXIST, /* GPIO15 is used for external 32K crystal */ _NOT_EXIST, /* GPIO16 is used for external 32K crystal */ #else _GPIO, /* GPIO15 */ _GPIO, /* GPIO16 */ #endif _GPIO, /* GPIO17 */ _GPIO, /* GPIO18 */ _GPIO, /* GPIO19 could be used for ESP USB/builtin USB2JTAG bridge */ _GPIO, /* GPIO20 could be used for ESP USB/builtin USB2JTAG bridge */ _GPIO, /* GPIO21 */ _NOT_EXIST, /* GPIO22 does not exist */ _NOT_EXIST, /* GPIO23 does not exist */ _NOT_EXIST, /* GPIO24 does not exist */ _NOT_EXIST, /* GPIO25 does not exist */ _NOT_EXIST, /* GPIO26 is used as direct I/O SPI CS1 for Flash/PSRAM */ #if defined(FLASH_MODE_QIO) || defined(FLASH_MODE_QOUT) _SPIF, /* GPIO27 is used as direct I/O SPI HD for Flash/PSRAM */ _SPIF, /* GPIO28 is used as direct I/O SPI WP for Flash/PSRAM */ #else _GPIO, /* GPIO27 */ _GPIO, /* GPIO28 */ #endif _SPIF, /* GPIO29 is used as direct I/O SPI CS0 for Flash/PSRAM */ _SPIF, /* GPIO30 is used as direct I/O SPI CLK for Flash/PSRAM */ _SPIF, /* GPIO31 is used as direct I/O SPI Q for Flash/PSRAM */ _SPIF, /* GPIO32 is used as direct I/O SPI D for Flash/PSRAM */ _GPIO, /* GPIO33 */ _GPIO, /* GPIO34 */ _GPIO, /* GPIO35 */ _GPIO, /* GPIO36 */ _GPIO, /* GPIO37 */ _GPIO, /* GPIO38 */ _GPIO, /* GPIO39 */ _GPIO, /* GPIO40 */ _GPIO, /* GPIO41 */ _GPIO, /* GPIO42 */ _UART, /* GPIO43 is used as direct I/O UART0 TxD */ _UART, /* GPIO44 is used as direct I/O UART0 RxD */ _GPIO, /* GPIO45 */ _GPIO, /* GPIO46 */ };
/* This file has been autogenerated by Ivory * Compiler version 0.1.0.3 */ #include "control_output_types.h" void control_output_get_le(const uint8_t *n_var0, uint32_t n_var1, struct control_output *n_var2) { ibool_get_le(n_var0, n_var1, &n_var2->armed); ivory_serialize_unpack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 1U), &n_var2->throttle); ivory_serialize_unpack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 5U), &n_var2->roll); ivory_serialize_unpack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 9U), &n_var2->pitch); ivory_serialize_unpack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 13U), &n_var2->yaw); } void control_output_get_be(const uint8_t *n_var0, uint32_t n_var1, struct control_output *n_var2) { ibool_get_be(n_var0, n_var1, &n_var2->armed); ivory_serialize_unpack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 1U), &n_var2->throttle); ivory_serialize_unpack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 5U), &n_var2->roll); ivory_serialize_unpack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 9U), &n_var2->pitch); ivory_serialize_unpack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 13U), &n_var2->yaw); } void control_output_set_le(uint8_t *n_var0, uint32_t n_var1, const struct control_output *n_var2) { ibool_set_le(n_var0, n_var1, &n_var2->armed); ivory_serialize_pack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 1U), &n_var2->throttle); ivory_serialize_pack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 5U), &n_var2->roll); ivory_serialize_pack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 9U), &n_var2->pitch); ivory_serialize_pack_float_le(n_var0, (uint32_t) (n_var1 + (uint32_t) 13U), &n_var2->yaw); } void control_output_set_be(uint8_t *n_var0, uint32_t n_var1, const struct control_output *n_var2) { ibool_set_be(n_var0, n_var1, &n_var2->armed); ivory_serialize_pack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 1U), &n_var2->throttle); ivory_serialize_pack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 5U), &n_var2->roll); ivory_serialize_pack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 9U), &n_var2->pitch); ivory_serialize_pack_float_be(n_var0, (uint32_t) (n_var1 + (uint32_t) 13U), &n_var2->yaw); }
/* *--------------------------------------------------------------------------- * * TclpInitPlatform -- * * Initialize all the platform-dependant things like signals and * floating-point error handling. * * Called at process initialization time. * * Results: * None. * * Side effects: * None. * *--------------------------------------------------------------------------- */ void TclpInitPlatform() { tclPlatform = TCL_PLATFORM_UNIX; if (TclOSseek(0, (Tcl_SeekOffset) 0, SEEK_CUR) == -1 && errno == EBADF) { open("/dev/null", O_RDONLY); } if (TclOSseek(1, (Tcl_SeekOffset) 0, SEEK_CUR) == -1 && errno == EBADF) { open("/dev/null", O_WRONLY); } if (TclOSseek(2, (Tcl_SeekOffset) 0, SEEK_CUR) == -1 && errno == EBADF) { open("/dev/null", O_WRONLY); } #ifdef SIGPIPE (void) signal(SIGPIPE, SIG_IGN); #endif #if defined(__FreeBSD__) && defined(__GNUC__) fpsetround(FP_RN); fpsetmask(0L); #endif #if defined(__bsdi__) && (_BSDI_VERSION > 199501) (void) dlopen (NULL, RTLD_NOW); #endif #ifdef GET_DARWIN_RELEASE { struct utsname name; if (!uname(&name)) { tclMacOSXDarwinRelease = strtol(name.release, NULL, 10); } } #endif }
/* * Consume CFDATA as much as we have already gotten and * compute the sum of CFDATA. */ static int64_t cab_minimum_consume_cfdata(struct archive_read *a, int64_t consumed_bytes) { struct cab *cab = (struct cab *)(a->format->data); struct cfdata *cfdata; int64_t cbytes, rbytes; int err; cfdata = cab->entry_cfdata; rbytes = consumed_bytes; if (cab->entry_cffolder->comptype == COMPTYPE_NONE) { if (consumed_bytes < cfdata->unconsumed) cbytes = consumed_bytes; else cbytes = cfdata->unconsumed; rbytes -= cbytes; cfdata->read_offset += (uint16_t)cbytes; cfdata->uncompressed_bytes_remaining -= (uint16_t)cbytes; cfdata->unconsumed -= cbytes; } else { cbytes = cfdata->uncompressed_avail - cfdata->read_offset; if (cbytes > 0) { if (consumed_bytes < cbytes) cbytes = consumed_bytes; rbytes -= cbytes; cfdata->read_offset += (uint16_t)cbytes; cfdata->uncompressed_bytes_remaining -= (uint16_t)cbytes; } if (cfdata->unconsumed) { cbytes = cfdata->unconsumed; cfdata->unconsumed = 0; } else cbytes = 0; } if (cbytes) { cab_checksum_update(a, (size_t)cbytes); __archive_read_consume(a, cbytes); cab->cab_offset += cbytes; cfdata->compressed_bytes_remaining -= (uint16_t)cbytes; if (cfdata->compressed_bytes_remaining == 0) { err = cab_checksum_finish(a); if (err < 0) return (err); } } return (rbytes); }
/** \fn void propagate_agents() * \brief Check agent positions to see if any need to be moved to a another node. */ void propagate_agents() { }
/** * fc_seq_start_next() - Lock the exchange and get a new sequence * for a given sequence/exchange pair * @sp: The sequence/exchange to get a new exchange for */ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) { struct fc_exch *ep = fc_seq_exch(sp); spin_lock_bh(&ep->ex_lock); sp = fc_seq_start_next_locked(sp); spin_unlock_bh(&ep->ex_lock); return sp; }
// SPDX-License-Identifier: GPL-2.0+ #include <linux/extcon.h> #include <linux/iio/consumer.h> #include <linux/iio/iio.h> #include <linux/input-event-codes.h> #include <linux/mfd/wm8994/registers.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/regulator/consumer.h> #include <sound/jack.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "i2s.h" #include "../codecs/wm8994.h" #define ARIES_MCLK1_FREQ 24000000 struct aries_wm8994_variant { unsigned int modem_dai_fmt; bool has_fm_radio; }; struct aries_wm8994_data { struct extcon_dev *usb_extcon; struct regulator *reg_main_micbias; struct regulator *reg_headset_micbias; struct gpio_desc *gpio_headset_detect; struct gpio_desc *gpio_headset_key; struct gpio_desc *gpio_earpath_sel; struct iio_channel *adc; const struct aries_wm8994_variant *variant; }; /* USB dock */ static struct snd_soc_jack aries_dock; static struct snd_soc_jack_pin dock_pins[] = { { .pin = "LINE", .mask = SND_JACK_LINEOUT, }, }; static int aries_extcon_notifier(struct notifier_block *this, unsigned long connected, void *_cmd) { if (connected) snd_soc_jack_report(&aries_dock, SND_JACK_LINEOUT, SND_JACK_LINEOUT); else snd_soc_jack_report(&aries_dock, 0, SND_JACK_LINEOUT); return NOTIFY_DONE; } static struct notifier_block aries_extcon_notifier_block = { .notifier_call = aries_extcon_notifier, }; /* Headset jack */ static struct snd_soc_jack aries_headset; static struct snd_soc_jack_pin jack_pins[] = { { .pin = "HP", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static struct snd_soc_jack_zone headset_zones[] = { { .min_mv = 0, .max_mv = 241, .jack_type = SND_JACK_HEADPHONE, }, { .min_mv = 242, .max_mv = 2980, .jack_type = SND_JACK_HEADSET, }, { .min_mv = 2981, .max_mv = UINT_MAX, .jack_type = SND_JACK_HEADPHONE, }, }; static irqreturn_t headset_det_irq_thread(int irq, void *data) { struct aries_wm8994_data *priv = (struct aries_wm8994_data *) data; int ret = 0; int time_left_ms = 300; int adc; while (time_left_ms > 0) { if (!gpiod_get_value(priv->gpio_headset_detect)) { snd_soc_jack_report(&aries_headset, 0, SND_JACK_HEADSET); gpiod_set_value(priv->gpio_earpath_sel, 0); return IRQ_HANDLED; } msleep(20); time_left_ms -= 20; } /* Temporarily enable micbias and earpath selector */ ret = regulator_enable(priv->reg_headset_micbias); if (ret) pr_err("%s failed to enable micbias: %d", __func__, ret); gpiod_set_value(priv->gpio_earpath_sel, 1); ret = iio_read_channel_processed(priv->adc, &adc); if (ret < 0) { /* failed to read ADC, so assume headphone */ pr_err("%s failed to read ADC, assuming headphones", __func__); snd_soc_jack_report(&aries_headset, SND_JACK_HEADPHONE, SND_JACK_HEADSET); } else { snd_soc_jack_report(&aries_headset, snd_soc_jack_get_type(&aries_headset, adc), SND_JACK_HEADSET); } ret = regulator_disable(priv->reg_headset_micbias); if (ret) pr_err("%s failed disable micbias: %d", __func__, ret); /* Disable earpath selector when no mic connected */ if (!(aries_headset.status & SND_JACK_MICROPHONE)) gpiod_set_value(priv->gpio_earpath_sel, 0); return IRQ_HANDLED; } static int headset_button_check(void *data) { struct aries_wm8994_data *priv = (struct aries_wm8994_data *) data; /* Filter out keypresses when 4 pole jack not detected */ if (gpiod_get_value_cansleep(priv->gpio_headset_key) && aries_headset.status & SND_JACK_MICROPHONE) return SND_JACK_BTN_0; return 0; } static struct snd_soc_jack_gpio headset_button_gpio[] = { { .name = "Media Button", .report = SND_JACK_BTN_0, .debounce_time = 30, .jack_status_check = headset_button_check, }, }; static int aries_spk_cfg(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_card *card = w->dapm->card; struct snd_soc_pcm_runtime *rtd; struct snd_soc_component *component; int ret = 0; rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]); component = asoc_rtd_to_codec(rtd, 0)->component; /** * We have an odd setup - the SPKMODE pin is pulled up so * we only have access to the left side SPK configs, * but SPKOUTR isn't bridged so when playing back in * stereo, we only get the left hand channel. The only * option we're left with is to force the AIF into mono * mode. */ switch (event) { case SND_SOC_DAPM_POST_PMU: ret = snd_soc_component_update_bits(component, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MONO, WM8994_AIF1DAC1_MONO); break; case SND_SOC_DAPM_PRE_PMD: ret = snd_soc_component_update_bits(component, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MONO, 0); break; } return ret; } static int aries_main_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_card *card = w->dapm->card; struct aries_wm8994_data *priv = snd_soc_card_get_drvdata(card); int ret = 0; switch (event) { case SND_SOC_DAPM_PRE_PMU: ret = regulator_enable(priv->reg_main_micbias); break; case SND_SOC_DAPM_POST_PMD: ret = regulator_disable(priv->reg_main_micbias); break; } return ret; } static int aries_headset_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_card *card = w->dapm->card; struct aries_wm8994_data *priv = snd_soc_card_get_drvdata(card); int ret = 0; switch (event) { case SND_SOC_DAPM_PRE_PMU: ret = regulator_enable(priv->reg_headset_micbias); break; case SND_SOC_DAPM_POST_PMD: ret = regulator_disable(priv->reg_headset_micbias); break; } return ret; } static const struct snd_kcontrol_new aries_controls[] = { SOC_DAPM_PIN_SWITCH("Modem In"), SOC_DAPM_PIN_SWITCH("Modem Out"), }; static const struct snd_soc_dapm_widget aries_dapm_widgets[] = { SND_SOC_DAPM_HP("HP", NULL), SND_SOC_DAPM_SPK("SPK", aries_spk_cfg), SND_SOC_DAPM_SPK("RCV", NULL), SND_SOC_DAPM_LINE("LINE", NULL), SND_SOC_DAPM_MIC("Main Mic", aries_main_bias), SND_SOC_DAPM_MIC("Headset Mic", aries_headset_bias), SND_SOC_DAPM_MIC("Bluetooth Mic", NULL), SND_SOC_DAPM_SPK("Bluetooth SPK", NULL), SND_SOC_DAPM_LINE("Modem In", NULL), SND_SOC_DAPM_LINE("Modem Out", NULL), /* This must be last as it is conditionally not used */ SND_SOC_DAPM_LINE("FM In", NULL), }; static int aries_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); unsigned int pll_out; int ret; /* AIF1CLK should be >=3MHz for optimal performance */ if (params_width(params) == 24) pll_out = params_rate(params) * 384; else if (params_rate(params) == 8000 || params_rate(params) == 11025) pll_out = params_rate(params) * 512; else pll_out = params_rate(params) * 256; ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1, ARIES_MCLK1_FREQ, pll_out); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1, pll_out, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static int aries_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); int ret; /* Switch sysclk to MCLK1 */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_MCLK1, ARIES_MCLK1_FREQ, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* Stop PLL */ ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1, ARIES_MCLK1_FREQ, 0); if (ret < 0) return ret; return 0; } /* * Main DAI operations */ static struct snd_soc_ops aries_ops = { .hw_params = aries_hw_params, .hw_free = aries_hw_free, }; static int aries_baseband_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); unsigned int pll_out; int ret; pll_out = 8000 * 512; /* Set the codec FLL */ ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, WM8994_FLL_SRC_MCLK1, ARIES_MCLK1_FREQ, pll_out); if (ret < 0) return ret; /* Set the codec system clock */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2, pll_out, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static int aries_late_probe(struct snd_soc_card *card) { struct aries_wm8994_data *priv = snd_soc_card_get_drvdata(card); int ret, irq; ret = snd_soc_card_jack_new(card, "Dock", SND_JACK_LINEOUT, &aries_dock, dock_pins, ARRAY_SIZE(dock_pins)); if (ret) return ret; ret = devm_extcon_register_notifier(card->dev, priv->usb_extcon, EXTCON_JACK_LINE_OUT, &aries_extcon_notifier_block); if (ret) return ret; if (extcon_get_state(priv->usb_extcon, EXTCON_JACK_LINE_OUT) > 0) snd_soc_jack_report(&aries_dock, SND_JACK_LINEOUT, SND_JACK_LINEOUT); else snd_soc_jack_report(&aries_dock, 0, SND_JACK_LINEOUT); ret = snd_soc_card_jack_new(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, &aries_headset, jack_pins, ARRAY_SIZE(jack_pins)); if (ret) return ret; ret = snd_soc_jack_add_zones(&aries_headset, ARRAY_SIZE(headset_zones), headset_zones); if (ret) return ret; irq = gpiod_to_irq(priv->gpio_headset_detect); if (irq < 0) { dev_err(card->dev, "Failed to map headset detect gpio to irq"); return -EINVAL; } ret = devm_request_threaded_irq(card->dev, irq, NULL, headset_det_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "headset_detect", priv); if (ret) { dev_err(card->dev, "Failed to request headset detect irq"); return ret; } headset_button_gpio[0].data = priv; headset_button_gpio[0].desc = priv->gpio_headset_key; snd_jack_set_key(aries_headset.jack, SND_JACK_BTN_0, KEY_MEDIA); return snd_soc_jack_add_gpios(&aries_headset, ARRAY_SIZE(headset_button_gpio), headset_button_gpio); } static const struct snd_soc_pcm_stream baseband_params = { .formats = SNDRV_PCM_FMTBIT_S16_LE, .rate_min = 8000, .rate_max = 8000, .channels_min = 1, .channels_max = 1, }; static const struct snd_soc_pcm_stream bluetooth_params = { .formats = SNDRV_PCM_FMTBIT_S16_LE, .rate_min = 8000, .rate_max = 8000, .channels_min = 1, .channels_max = 2, }; static const struct snd_soc_dapm_widget aries_modem_widgets[] = { SND_SOC_DAPM_INPUT("Modem RX"), SND_SOC_DAPM_OUTPUT("Modem TX"), }; static const struct snd_soc_dapm_route aries_modem_routes[] = { { "Modem Capture", NULL, "Modem RX" }, { "Modem TX", NULL, "Modem Playback" }, }; static const struct snd_soc_component_driver aries_component = { .name = "aries-audio", .dapm_widgets = aries_modem_widgets, .num_dapm_widgets = ARRAY_SIZE(aries_modem_widgets), .dapm_routes = aries_modem_routes, .num_dapm_routes = ARRAY_SIZE(aries_modem_routes), .idle_bias_on = 1, .use_pmdown_time = 1, .endianness = 1, .non_legacy_dai_naming = 1, }; static struct snd_soc_dai_driver aries_ext_dai[] = { { .name = "Voice call", .playback = { .stream_name = "Modem Playback", .channels_min = 1, .channels_max = 1, .rate_min = 8000, .rate_max = 8000, .rates = SNDRV_PCM_RATE_8000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "Modem Capture", .channels_min = 1, .channels_max = 1, .rate_min = 8000, .rate_max = 8000, .rates = SNDRV_PCM_RATE_8000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, }; SND_SOC_DAILINK_DEFS(aif1, DAILINK_COMP_ARRAY(COMP_CPU(SAMSUNG_I2S_DAI)), DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8994-aif1")), DAILINK_COMP_ARRAY(COMP_EMPTY())); SND_SOC_DAILINK_DEFS(baseband, DAILINK_COMP_ARRAY(COMP_CPU("Voice call")), DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8994-aif2"))); SND_SOC_DAILINK_DEFS(bluetooth, DAILINK_COMP_ARRAY(COMP_CPU("bt-sco-pcm")), DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8994-aif3"))); static struct snd_soc_dai_link aries_dai[] = { { .name = "WM8994 AIF1", .stream_name = "HiFi", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .ops = &aries_ops, SND_SOC_DAILINK_REG(aif1), }, { .name = "WM8994 AIF2", .stream_name = "Baseband", .init = &aries_baseband_init, .params = &baseband_params, .ignore_suspend = 1, SND_SOC_DAILINK_REG(baseband), }, { .name = "WM8994 AIF3", .stream_name = "Bluetooth", .params = &bluetooth_params, .ignore_suspend = 1, SND_SOC_DAILINK_REG(bluetooth), }, }; static struct snd_soc_card aries_card = { .name = "ARIES", .owner = THIS_MODULE, .dai_link = aries_dai, .num_links = ARRAY_SIZE(aries_dai), .controls = aries_controls, .num_controls = ARRAY_SIZE(aries_controls), .dapm_widgets = aries_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(aries_dapm_widgets), .late_probe = aries_late_probe, }; static const struct aries_wm8994_variant fascinate4g_variant = { .modem_dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF, .has_fm_radio = false, }; static const struct aries_wm8994_variant aries_variant = { .modem_dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF, .has_fm_radio = true, }; static const struct of_device_id samsung_wm8994_of_match[] = { { .compatible = "samsung,fascinate4g-wm8994", .data = &fascinate4g_variant, }, { .compatible = "samsung,aries-wm8994", .data = &aries_variant, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match); static int aries_audio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *cpu, *codec, *extcon_np; struct device *dev = &pdev->dev; struct snd_soc_card *card = &aries_card; struct aries_wm8994_data *priv; struct snd_soc_dai_link *dai_link; const struct of_device_id *match; int ret, i; if (!np) return -EINVAL; card->dev = dev; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; snd_soc_card_set_drvdata(card, priv); match = of_match_node(samsung_wm8994_of_match, np); priv->variant = match->data; /* Remove FM widget if not present */ if (!priv->variant->has_fm_radio) card->num_dapm_widgets--; priv->reg_main_micbias = devm_regulator_get(dev, "main-micbias"); if (IS_ERR(priv->reg_main_micbias)) { dev_err(dev, "Failed to get main micbias regulator\n"); return PTR_ERR(priv->reg_main_micbias); } priv->reg_headset_micbias = devm_regulator_get(dev, "headset-micbias"); if (IS_ERR(priv->reg_headset_micbias)) { dev_err(dev, "Failed to get headset micbias regulator\n"); return PTR_ERR(priv->reg_headset_micbias); } priv->gpio_earpath_sel = devm_gpiod_get(dev, "earpath-sel", GPIOD_OUT_LOW); if (IS_ERR(priv->gpio_earpath_sel)) { dev_err(dev, "Failed to get earpath selector gpio"); return PTR_ERR(priv->gpio_earpath_sel); } extcon_np = of_parse_phandle(np, "extcon", 0); priv->usb_extcon = extcon_find_edev_by_node(extcon_np); if (IS_ERR(priv->usb_extcon)) { if (PTR_ERR(priv->usb_extcon) != -EPROBE_DEFER) dev_err(dev, "Failed to get extcon device"); return PTR_ERR(priv->usb_extcon); } of_node_put(extcon_np); priv->adc = devm_iio_channel_get(dev, "headset-detect"); if (IS_ERR(priv->adc)) { if (PTR_ERR(priv->adc) != -EPROBE_DEFER) dev_err(dev, "Failed to get ADC channel"); return PTR_ERR(priv->adc); } if (priv->adc->channel->type != IIO_VOLTAGE) return -EINVAL; priv->gpio_headset_key = devm_gpiod_get(dev, "headset-key", GPIOD_IN); if (IS_ERR(priv->gpio_headset_key)) { dev_err(dev, "Failed to get headset key gpio"); return PTR_ERR(priv->gpio_headset_key); } priv->gpio_headset_detect = devm_gpiod_get(dev, "headset-detect", GPIOD_IN); if (IS_ERR(priv->gpio_headset_detect)) { dev_err(dev, "Failed to get headset detect gpio"); return PTR_ERR(priv->gpio_headset_detect); } /* Update card-name if provided through DT, else use default name */ snd_soc_of_parse_card_name(card, "model"); ret = snd_soc_of_parse_audio_routing(card, "samsung,audio-routing"); if (ret < 0) { dev_err(dev, "Audio routing invalid/unspecified\n"); return ret; } aries_dai[1].dai_fmt = priv->variant->modem_dai_fmt; cpu = of_get_child_by_name(dev->of_node, "cpu"); if (!cpu) return -EINVAL; codec = of_get_child_by_name(dev->of_node, "codec"); if (!codec) return -EINVAL; for_each_card_prelinks(card, i, dai_link) { dai_link->codecs->of_node = of_parse_phandle(codec, "sound-dai", 0); if (!dai_link->codecs->of_node) { ret = -EINVAL; goto out; } } /* Set CPU and platform of_node for main DAI */ aries_dai[0].cpus->of_node = of_parse_phandle(cpu, "sound-dai", 0); if (!aries_dai[0].cpus->of_node) { ret = -EINVAL; goto out; } aries_dai[0].platforms->of_node = aries_dai[0].cpus->of_node; /* Set CPU of_node for BT DAI */ aries_dai[2].cpus->of_node = of_parse_phandle(cpu, "sound-dai", 1); if (!aries_dai[2].cpus->of_node) { ret = -EINVAL; goto out; } ret = devm_snd_soc_register_component(dev, &aries_component, aries_ext_dai, ARRAY_SIZE(aries_ext_dai)); if (ret < 0) { dev_err(dev, "Failed to register component: %d\n", ret); goto out; } ret = devm_snd_soc_register_card(dev, card); if (ret) dev_err(dev, "snd_soc_register_card() failed:%d\n", ret); out: of_node_put(cpu); of_node_put(codec); return ret; } static struct platform_driver aries_audio_driver = { .driver = { .name = "aries-audio-wm8994", .of_match_table = of_match_ptr(samsung_wm8994_of_match), .pm = &snd_soc_pm_ops, }, .probe = aries_audio_probe, }; module_platform_driver(aries_audio_driver); MODULE_DESCRIPTION("ALSA SoC ARIES WM8994"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:aries-audio-wm8994");
// SPDX-License-Identifier: GPL-2.0 #include <linux/delay.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tty.h> #include <uapi/linux/serial.h> struct ledtrig_tty_data { struct led_classdev *led_cdev; struct delayed_work dwork; struct mutex mutex; const char *ttyname; struct tty_struct *tty; int rx, tx; }; static void ledtrig_tty_restart(struct ledtrig_tty_data *trigger_data) { schedule_delayed_work(&trigger_data->dwork, 0); } static ssize_t ttyname_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev); ssize_t len = 0; mutex_lock(&trigger_data->mutex); if (trigger_data->ttyname) len = sprintf(buf, "%s\n", trigger_data->ttyname); mutex_unlock(&trigger_data->mutex); return len; } static ssize_t ttyname_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev); char *ttyname; ssize_t ret = size; bool running; if (size > 0 && buf[size - 1] == '\n') size -= 1; if (size) { ttyname = kmemdup_nul(buf, size, GFP_KERNEL); if (!ttyname) return -ENOMEM; } else { ttyname = NULL; } mutex_lock(&trigger_data->mutex); running = trigger_data->ttyname != NULL; kfree(trigger_data->ttyname); tty_kref_put(trigger_data->tty); trigger_data->tty = NULL; trigger_data->ttyname = ttyname; mutex_unlock(&trigger_data->mutex); if (ttyname && !running) ledtrig_tty_restart(trigger_data); return ret; } static DEVICE_ATTR_RW(ttyname); static void ledtrig_tty_work(struct work_struct *work) { struct ledtrig_tty_data *trigger_data = container_of(work, struct ledtrig_tty_data, dwork.work); struct serial_icounter_struct icount; int ret; mutex_lock(&trigger_data->mutex); if (!trigger_data->ttyname) { /* exit without rescheduling */ mutex_unlock(&trigger_data->mutex); return; } /* try to get the tty corresponding to $ttyname */ if (!trigger_data->tty) { dev_t devno; struct tty_struct *tty; int ret; ret = tty_dev_name_to_number(trigger_data->ttyname, &devno); if (ret < 0) /* * A device with this name might appear later, so keep * retrying. */ goto out; tty = tty_kopen_shared(devno); if (IS_ERR(tty) || !tty) /* What to do? retry or abort */ goto out; trigger_data->tty = tty; } ret = tty_get_icount(trigger_data->tty, &icount); if (ret) { dev_info(trigger_data->tty->dev, "Failed to get icount, stopped polling\n"); mutex_unlock(&trigger_data->mutex); return; } if (icount.rx != trigger_data->rx || icount.tx != trigger_data->tx) { led_set_brightness_sync(trigger_data->led_cdev, LED_ON); trigger_data->rx = icount.rx; trigger_data->tx = icount.tx; } else { led_set_brightness_sync(trigger_data->led_cdev, LED_OFF); } out: mutex_unlock(&trigger_data->mutex); schedule_delayed_work(&trigger_data->dwork, msecs_to_jiffies(100)); } static struct attribute *ledtrig_tty_attrs[] = { &dev_attr_ttyname.attr, NULL }; ATTRIBUTE_GROUPS(ledtrig_tty); static int ledtrig_tty_activate(struct led_classdev *led_cdev) { struct ledtrig_tty_data *trigger_data; trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); if (!trigger_data) return -ENOMEM; led_set_trigger_data(led_cdev, trigger_data); INIT_DELAYED_WORK(&trigger_data->dwork, ledtrig_tty_work); trigger_data->led_cdev = led_cdev; mutex_init(&trigger_data->mutex); return 0; } static void ledtrig_tty_deactivate(struct led_classdev *led_cdev) { struct ledtrig_tty_data *trigger_data = led_get_trigger_data(led_cdev); cancel_delayed_work_sync(&trigger_data->dwork); kfree(trigger_data); } static struct led_trigger ledtrig_tty = { .name = "tty", .activate = ledtrig_tty_activate, .deactivate = ledtrig_tty_deactivate, .groups = ledtrig_tty_groups, }; module_led_trigger(ledtrig_tty); MODULE_AUTHOR("Uwe Kleine-König <u.kleine-koenig@pengutronix.de>"); MODULE_DESCRIPTION("UART LED trigger"); MODULE_LICENSE("GPL v2");
//Atualiza o arquivo de livros. int atualizar_livro(livro *pLivro,int posicao_titulo){ FILE *arquivo = NULL; if((arquivo = fopen("livros.bin","rb+")) == NULL){ printf("\nFalha ao abrir o arquivo de livros (Atualizar o arquivo)\n\n"); system("PAUSE"); }else{ fseek(arquivo,posicao_titulo*sizeof(livro),0); fwrite(pLivro,sizeof(livro),1,arquivo); } fclose(arquivo); }
/******************************************************************************************************/ /*! * \fn void update_blktime_rhs(block_timestepper *ts) * * \brief Updates the right-hand side for a BLOCK timestepping scheme * * \param ts block Timestepping struct * * \return ts.rhstime RHS to solve with * */ void update_blktime_rhs(block_timestepper *ts) { SHORT status; if(ts->time_scheme==0) { dvector btmp = dvec_create(ts->rhs->row); dvector Mu = dvec_create(ts->rhs->row); dvector Lu = dvec_create(ts->rhs->row); dvec_axpyz(1.0,ts->rhs_prev,ts->rhs,&btmp); dvec_ax(0.5*ts->dt,&btmp); bdcsr_mxv(ts->M,ts->sol_prev->val,Mu.val); dvec_axpy(1.0,&Mu,&btmp); ts->L(ts->Ldata,ts->sol_prev->val,Lu.val); dvec_axpyz(-0.5*ts->dt,&Lu,&btmp,ts->rhs_time); dvec_free(&btmp); dvec_free(&Mu); dvec_free(&Lu); } else if(ts->time_scheme==1) { dvector btmp = dvec_create(ts->sol->row); bdcsr_mxv(ts->M,ts->sol_prev->val,btmp.val); dvec_axpyz(ts->dt,ts->rhs,&btmp,ts->rhs_time); dvec_free(&btmp); } else if(ts->time_scheme==2) { dvector btmp1 = dvec_create(ts->sol->row); dvector btmp2 = dvec_create(ts->sol->row); REAL* solprevptr; bdcsr_mxv(ts->M,ts->sol_prev->val,btmp1.val); dvec_ax(4.0/3.0,&btmp1); solprevptr = ts->sol_prev->val + ts->sol->row; bdcsr_mxv(ts->M,solprevptr,btmp2.val); dvec_ax(-1.0/3.0,&btmp2); dvec_axpy(1.0,&btmp1,&btmp2); dvec_axpyz((2.0/3.0)*ts->dt,ts->rhs,&btmp2,ts->rhs_time); dvec_free(&btmp1); dvec_free(&btmp2); } else { status = ERROR_TS_TYPE; check_error(status, __FUNCTION__); } return; }
/* Covariant shift of the src half wilson fermion field in the * * direction dir by one unit. The result is stored in dest. */ static void u_shift_hw_fermion(half_wilson_vector *src, half_wilson_vector *dest, int dir, msg_tag **mtag, half_wilson_vector *tmpvec) { #ifdef FN_DEBUG site *s ; int i ; #endif #ifdef FFSTIME double time0, time1; time1 = -dclock(); #endif memcpy( (char *)tmpvec, (char *)src, sites_on_node*sizeof(half_wilson_vector) ); if(*mtag == NULL) *mtag = start_gather_field(tmpvec, sizeof(half_wilson_vector), dir, EVENANDODD, gen_pt[dir]); else restart_gather_field(tmpvec, sizeof(half_wilson_vector), dir, EVENANDODD, gen_pt[dir], *mtag); wait_gather(*mtag); #ifdef FFSTIME time0 = -dclock(); time1 -= time0; #endif if(GOES_FORWARDS(dir)) { #ifdef FN_DEBUG FORALLSITES(i,s) mult_su3_mat_hwvec( forwardlink[dir]+i, (half_wilson_vector *)gen_pt[dir][i], dest + i ); #else mult_su3_fieldlink_lathwvec(forwardlink[dir], (half_wilson_vector **)gen_pt[dir], dest); #endif } else { #ifdef FN_DEBUG FORALLSITES(i,s) mult_adj_su3_mat_hwvec( backwardlink[OPP_DIR(dir)] + i, (half_wilson_vector *)gen_pt[dir][i], dest + i ); #else mult_adj_su3_fieldlink_lathwvec( backwardlink[OPP_DIR(dir)], (half_wilson_vector **)gen_pt[dir], dest); #endif } #ifdef FFSTIME time0 += dclock(); node0_printf("FFSHIFT time0 = %e\nFFSHIFT time1 = %e\n",time0,time1); #endif }
/** * Print parts of admin menu * * Create election when election is in the INIT state * Open voting when election is in the NEW state. * Close voting when election is in the OPEN state. * * @param s Election state * @param cred Auth credential id */ static void cgc_print_admin_menu(e_states s, auth_t cred) { if ((INIT == s) && (NO_AUTH == cred)) { SEND(STDOUT, MENU_CREATE_ELECTION, sizeof(MENU_CREATE_ELECTION)); } if ((NEW == s) && (E_MGR == cred)) { SEND(STDOUT, MENU_OPEN_VOTING, sizeof(MENU_OPEN_VOTING)); } if ((OPEN == s) && (E_MGR == cred)) { SEND(STDOUT, MENU_CLOSE_VOTING, sizeof(MENU_CLOSE_VOTING)); } if (((OPEN == s) || (NEW == s)) && (E_MGR == cred)) { SEND(STDOUT, MENU_ELECTION_STATUS, sizeof(MENU_ELECTION_STATUS)); } }
#include<stdio.h> int map[1010][1010]; int main() { int n,x1,y1,x2,y2,x,y,flag=0; scanf("%d",&n); scanf("%d%d",&x1,&y1); scanf("%d%d",&x2,&y2); scanf("%d%d",&x,&y); if(x2<x1&&y2>y1&&x<x1&&y>y1||x2<x1&&y2<y1&&x<x1&&y<y1||x2>x1&&y2>y1&&x>x1&&y>y1||x2>x1&&y2<y1&&x>x1&&y<y1) flag=1; if(flag) printf("YES\n"); else printf("NO\n"); return 0; }
/** * @brief Read/write the specified amount of data from the SPI driver. * * Note: This function is asynchronous. * * @param dev Pointer to the device structure for the driver instance * @param config Pointer to a valid spi_config structure instance. * @param tx_bufs Buffer array where data to be sent originates from, * or NULL if none. * @param rx_bufs Buffer array where data to be read will be written to, * or NULL if none. * @param async A pointer to a valid and ready to be signaled * struct k_poll_signal. (Note: if NULL this function will not * notify the end of the transaction, and whether it went * successfully or not). * * @retval 0 If successful, negative errno code otherwise. In case of slave * transaction: if successful it will return the amount of frames * received, negative errno code otherwise. */ static inline int spi_transceive_async(struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { const struct spi_driver_api *api = (const struct spi_driver_api *)dev->driver_api; return api->transceive_async(dev, config, tx_bufs, rx_bufs, async); }
#ifndef STATS_H #define STATS_H /* Header for writing statistics*/ /* Structs and functions to open file descriptors for logging output*/ struct OutputFD { FILE *FdEnergy; /*!< file handle for energy.txt log-file. */ FILE *FdCPU; /*!< file handle for cpu.txt log-file. */ FILE *FdSfr; /*!< file handle for sfr.txt log-file. */ FILE *FdBlackHoles; /*!< file handle for blackholes.txt log-file. */ FILE *FdBlackholeDetails; /*!< file handle for BlackholeDetails binary file. */ FILE *FdHelium; /* < file handle for the Helium reionization log file helium.txt */ }; void set_stats_params(ParameterSet * ps); void open_outputfiles(int RestartSnapNum, struct OutputFD * fds, const char * OutputDir, int BlackHoleOn, int StarformationOn); void close_outputfiles(struct OutputFD *fds); /* Write out a CPU log file*/ void write_cpu_log(int NumCurrentTiStep, const double atime, FILE * FdCPU, double ElapsedTime); /* Write out overall statistics of the energy of the simulation */ void energy_statistics(FILE * FdEnergy, const double Time, struct part_manager_type * PartManager); #endif
/* * Connect hook. Start incoming USB transfers. * Netgraph context. */ static int ng_ubt_connect(hook_p hook) { struct ubt_softc *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); UBT_NG_LOCK(sc); ubt_task_schedule(sc, UBT_FLAG_T_START_ALL); UBT_NG_UNLOCK(sc); return (0); }
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mm/proc-syms.c * * Copyright (C) 2000-2002 Russell King */ #include <linux/module.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/proc-fns.h> #include <asm/tlbflush.h> #include <asm/page.h> #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); #ifdef CONFIG_MMU EXPORT_SYMBOL(cpu_set_pte_ext); #endif #else EXPORT_SYMBOL(processor); #endif #ifndef MULTI_CACHE EXPORT_SYMBOL(__cpuc_flush_kern_all); EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range); EXPORT_SYMBOL(__cpuc_flush_dcache_area); #else EXPORT_SYMBOL(cpu_cache); #endif #ifdef CONFIG_MMU #ifndef MULTI_USER EXPORT_SYMBOL(__cpu_clear_user_highpage); EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); #endif #endif /* * No module should need to touch the TLB (and currently * no modules do. We export this for "loadkernel" support * (booting a new kernel from within a running kernel.) */ #ifdef MULTI_TLB EXPORT_SYMBOL(cpu_tlb); #endif
// --------------------------------- // release list built by above function // MUST be called prior to application exit to properly release devices // if not called (or app crashes) devices can be recovered by pluging into different location in USB chain void HIDReleaseDeviceList (void) { while (NULL != gpDeviceList) gpDeviceList = hid_DisposeDevice (gpDeviceList); gNumDevices = 0; }
/* * Print the command line options and exit. */ static void help_arg(void) { usage(); putchar('\n'); printf("%s\n", _("Operations in command line mode:")); printf("%s\n", _(" -Q, --query Print items in a given query range")); printf("%s\n", _(" -G, --grep Grep items from the data files")); printf("%s\n", _(" -P, --purge Read items and write them back")); printf("%s\n", _("Query short forms:\n" "-a, -d <date>|<number>, -n, -r[<number>], -s[<date>], -t<number>")); putchar('\n'); printf("%s\n", _("Note that filter, format and day-range options affect input or output:")); printf("%s\n", _(" --filter-* Filter items loaded by -Q, -G, -P and -x")); printf("%s\n", _(" --format-* Rewrite output from -Q, -G and --dump-imported")); printf("%s\n", _(" --from <date> Limit day range of -Q.")); printf("%s\n", _(" --to <date> Limit day range of -Q.")); printf("%s\n", _(" --days <number> Limit day range of -Q.")); putchar('\n'); printf("%s\n", _(" --limit, -l <number> Limit number of query results")); printf("%s\n", _(" --search, -S <regexp> Match regular expression in queries")); printf("%s\n", _("Consult the man page for details.")); putchar('\n'); printf("%s\n", _("Miscellaneous:")); printf("%s\n", _(" -c, --calendar <file> The calendar data file to use")); printf("%s\n", _(" -C, --confdir <dir> The configuration directory to use")); printf("%s\n", _(" --daemon Run notification daemon in the background")); printf("%s\n", _(" -D, --datadir <dir> The data directory to use")); printf("%s\n", _(" -g, --gc Run the garbage collector")); printf("%s\n", _(" -h, --help Show this help text")); printf("%s\n", _(" -i, --import <file> Import iCal data from file")); printf("%s\n", _(" -q, --quiet Suppress import/export result message")); printf("%s\n", _(" --read-only Do not save configuration or data files")); printf("%s\n", _(" --status Display status of running instances")); printf("%s\n", _(" -v, --version Show version information")); printf("%s\n", _(" -x, --export[<format>] Export to stdout in ical (default) or pcal format")); putchar('\n'); printf("%s\n", _("For more information, type '?' from within calcurse, or read the manpage.")); printf("%s\n", _("Submit feature requests and suggestions to <misc@calcurse.org>.")); printf("%s\n", _("Submit bug reports to <bugs@calcurse.org>.")); }
/* Repaint the portion of the Smalltalk screen bounded by the affected rectangle. Used to synchronize the screen after a Bitblt to the Smalltalk Display object. */ int showDisplayBitsLeftTopRightBottom(int aForm, int l, int t, int r, int b) { if (deferDisplayUpdates) { return null; } displayBitsOfLeftTopRightBottom(aForm, l, t, r, b); }
/* Convert the system provided siginfo into compatible siginfo. */ static void compat_siginfo_from_siginfo (compat_siginfo_t *to, const siginfo_t *from) { ptrace_siginfo_t from_ptrace; memcpy (&from_ptrace, from, sizeof (from_ptrace)); memset (to, 0, sizeof (*to)); to->si_signo = from_ptrace.si_signo; to->si_errno = from_ptrace.si_errno; to->si_code = from_ptrace.si_code; if (to->si_code == SI_TIMER) { to->cpt_si_timerid = from_ptrace.cpt_si_timerid; to->cpt_si_overrun = from_ptrace.cpt_si_overrun; to->cpt_si_ptr = from_ptrace.cpt_si_ptr; } else if (to->si_code == SI_USER) { to->cpt_si_pid = from_ptrace.cpt_si_pid; to->cpt_si_uid = from_ptrace.cpt_si_uid; } else if (to->si_code < 0) { to->cpt_si_pid = from_ptrace.cpt_si_pid; to->cpt_si_uid = from_ptrace.cpt_si_uid; to->cpt_si_ptr = from_ptrace.cpt_si_ptr; } else { switch (to->si_signo) { case SIGCHLD: to->cpt_si_pid = from_ptrace.cpt_si_pid; to->cpt_si_uid = from_ptrace.cpt_si_uid; to->cpt_si_status = from_ptrace.cpt_si_status; to->cpt_si_utime = from_ptrace.cpt_si_utime; to->cpt_si_stime = from_ptrace.cpt_si_stime; break; case SIGILL: case SIGFPE: case SIGSEGV: case SIGBUS: to->cpt_si_addr = from_ptrace.cpt_si_addr; break; case SIGPOLL: to->cpt_si_band = from_ptrace.cpt_si_band; to->cpt_si_fd = from_ptrace.cpt_si_fd; break; default: to->cpt_si_pid = from_ptrace.cpt_si_pid; to->cpt_si_uid = from_ptrace.cpt_si_uid; to->cpt_si_ptr = from_ptrace.cpt_si_ptr; break; } } }
#include <stdio.h> int main() { int t, n, st, ed, sum; scanf("%d", &t); while(t--) { sum = 0; scanf("%d %d", &n, &st); while(sum += (getchar() == ' '), sum < n - 1); scanf("%d", &ed); if(st < ed) puts("YES"); else puts("NO"); } return 0; }
/************************************************************************ * * * Write error message from Textsw into a file. * * */ static void write_err_message(char *dirpath, char *filename) { char fullname[128]; if (*filename == NULL) { msgerr_print("write_err_message:Must specify a filename to be saved"); return; } (void)sprintf(fullname, "%s/%s", dirpath, filename); if (msgerr_write(fullname) == NOT_OK) msgerr_print("write_err_message:Couldn't save error message into %s", fullname); }
// if returned true, the time interval given with "tick" has been crossed bool validate(const float dt) { m_sumTick += dt; if (m_sumTick >= m_tick) { m_sumTick -= m_tick; return true; } return false; }
/* * Replace a line in the specified buffer. The line number is * in Vim format (1-based). The replacement line is given as * a Python string object. The object is checked for validity * and correct format. Errors are returned as a value of FAIL. * The return value is OK on success. * If OK is returned and len_change is not NULL, *len_change * is set to the change in the buffer length. */ static int SetBufferLine(buf_T *buf, PyInt n, PyObject *line, PyInt *len_change) { bufref_T save_curbuf = {NULL, 0, 0}; win_T *save_curwin = NULL; tabpage_T *save_curtab = NULL; if (line == Py_None || line == NULL) { PyErr_Clear(); switch_to_win_for_buf(buf, &save_curwin, &save_curtab, &save_curbuf); VimTryStart(); if (u_savedel((linenr_T)n, 1L) == FAIL) RAISE_UNDO_FAIL; else if (ml_delete((linenr_T)n) == FAIL) RAISE_DELETE_LINE_FAIL; else { if (buf == curbuf && (save_curwin != NULL || save_curbuf.br_buf == NULL)) py_fix_cursor((linenr_T)n, (linenr_T)n + 1, (linenr_T)-1); if (save_curbuf.br_buf == NULL) deleted_lines_mark((linenr_T)n, 1L); } restore_win_for_buf(save_curwin, save_curtab, &save_curbuf); if (VimTryEnd()) return FAIL; if (len_change) *len_change = -1; return OK; } else if (PyBytes_Check(line) || PyUnicode_Check(line)) { char *save = StringToLine(line); if (save == NULL) return FAIL; VimTryStart(); PyErr_Clear(); switch_to_win_for_buf(buf, &save_curwin, &save_curtab, &save_curbuf); if (u_savesub((linenr_T)n) == FAIL) { RAISE_UNDO_FAIL; vim_free(save); } else if (ml_replace((linenr_T)n, (char_u *)save, FALSE) == FAIL) { RAISE_REPLACE_LINE_FAIL; vim_free(save); } else changed_bytes((linenr_T)n, 0); restore_win_for_buf(save_curwin, save_curtab, &save_curbuf); if (buf == curbuf) check_cursor_col(); if (VimTryEnd()) return FAIL; if (len_change) *len_change = 0; return OK; } else { PyErr_BadArgument(); return FAIL; } }
/**************************************************************************/ /** * @brief Tell the FRAM chip what address we wish to act upon * @param addr - address in memory to act upon *****************************************************************************/ static void WriteAddress(uint32_t addr) { if (_addressSize > 3) SPItransfer((uint8_t)(addr >> 24)); if (_addressSize > 2) SPItransfer((uint8_t)(addr >> 16)); SPItransfer((uint8_t)(addr >> 8)); SPItransfer((uint8_t)(addr & 0xFF)); }
/* Create and initialize a Tx queue */ static int mvpp2_txq_init(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; unsigned int thread; int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu; txq->size = port->tx_ring_size; txq->descs = dma_alloc_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, &txq->descs_dma, GFP_KERNEL); if (!txq->descs) return -ENOMEM; txq->last_desc = txq->size - 1; thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); desc_per_txq = 16; desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); val = MVPP2_TXQ_TOKEN_SIZE_MAX; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val); for (thread = 0; thread < port->priv->nthreads; thread++) { txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), GFP_KERNEL); if (!txq_pcpu->buffs) return -ENOMEM; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; txq_pcpu->tso_headers = NULL; txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; txq_pcpu->tso_headers = dma_alloc_coherent(port->dev->dev.parent, txq_pcpu->size * TSO_HEADER_SIZE, &txq_pcpu->tso_headers_dma, GFP_KERNEL); if (!txq_pcpu->tso_headers) return -ENOMEM; } return 0; }
//***************************************************************************** // //! \brief Get the captured data for a sample sequence. //! //! \param ulBase is the base address of the ADC module. //! \param pulBuffer is the address where the data is stored. //! //! This function copies data from the specified sample sequence FIFO to //! a memory resident buffer. The number of samples available in the //! FIFO are copied into the buffer, which is assumed to be large enough to //! hold that many samples. This will only return the samples that are //! presently available, which may not be the entire sample sequence if it is //! in the process of being executed. //! //! \return Returns the number of samples copied to the buffer. // //***************************************************************************** unsigned long xADCDataGet(unsigned long ulBase, unsigned long *pulBuffer) { unsigned long ulValid = 0; unsigned long ulWrite; Check the arguments xASSERT(ulBase == ADC_BASE); xASSERT(pulBuffer != 0); Check if ADC Conversion is complete. do { ulValid = (xHWREG(ulBase + ADC0_SC1A) & ADC0_SC1A_COCO_FLAG) >> 7; } while(!ulValid); Copy data to buffer ulWrite = 0; pulBuffer[ulWrite++] = xHWREG(ulBase + ADC0_RA); return ulWrite; }
/* Registers ic_nbctl and common db commands. */ static void ic_nbctl_cmd_init(void) { ctl_init(&icnbrec_idl_class, icnbrec_table_classes, tables, cmd_show_tables, ic_nbctl_exit); ctl_register_commands(ic_nbctl_commands); }
/** * Read an entry for the section from the line. Function returns 0 if an entry * was found, non-zero otherwise. Return values less than 0 indicate an error * with the config file. * * @param line the line to read the entry from * @param section the struct to read the entry into * * @return 0 if an entry was found * <0 for config file errors * >0 for issues such as empty line * */ static int read_section_entry(const char *line, struct section *section) { char *equaltok; char *temp_equaltok; const char *splitter = "="; char *buffer; size_t len = 0; if (line == NULL || section == NULL) { fprintf(ERRORFILE, "NULL params passed to read_section_entry"); return -1; } len = strlen(line); if (len == 0) { return 1; } if ((section->size) % MAX_SIZE == 0) { section->kv_pairs = (struct kv_pair **) realloc( section->kv_pairs, sizeof(struct kv_pair *) * (MAX_SIZE + section->size)); if (section->kv_pairs == NULL) { fprintf(ERRORFILE, "Failed re-allocating memory for configuration items\n"); exit(OUT_OF_MEMORY); } } buffer = strdup(line); if (!buffer) { fprintf(ERRORFILE, "Failed to allocating memory for line, %s\n", __func__); exit(OUT_OF_MEMORY); } equaltok = strtok_r(buffer, splitter, &temp_equaltok); if (equaltok == NULL) { fprintf(ERRORFILE, "Error with line '%s', no '=' found\n", buffer); exit(INVALID_CONFIG_FILE); } section->kv_pairs[section->size] = (struct kv_pair *) malloc( sizeof(struct kv_pair)); if (section->kv_pairs[section->size] == NULL) { fprintf(ERRORFILE, "Failed allocating memory for single section item\n"); exit(OUT_OF_MEMORY); } memset(section->kv_pairs[section->size], 0, sizeof(struct kv_pair)); section->kv_pairs[section->size]->key = trim(equaltok); equaltok = strtok_r(NULL, splitter, &temp_equaltok); if (equaltok == NULL) { int has_values = 1; if (strstr(line, splitter) == NULL) { fprintf(ERRORFILE, "configuration tokenization failed, error with line %s\n", line); has_values = 0; } free((void *) section->kv_pairs[section->size]->key); free((void *) section->kv_pairs[section->size]); section->kv_pairs[section->size] = NULL; free(buffer); if (!has_values) { return -1; } return 2; } #ifdef DEBUG fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok); #endif section->kv_pairs[section->size]->value = trim(equaltok); section->size++; free(buffer); return 0; }
/** * _puts - function that prints a string, followed by a new line * @str: string */ void _puts(char *str) { int i; for (i = 0; str[i] != 0; i++) _putchar(str[i]); }
/* * Copyright (c) 2018 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #ifndef INTERRUPT_UTIL_H_ #define INTERRUPT_UTIL_H_ #define MS_TO_US(ms) (ms * USEC_PER_MSEC) #if defined(CONFIG_CPU_CORTEX_M) #include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h> static inline uint32_t get_available_nvic_line(uint32_t initial_offset) { int i; for (i = initial_offset - 1; i >= 0; i--) { if (NVIC_GetEnableIRQ(i) == 0) { /* * Interrupts configured statically with IRQ_CONNECT(.) * are automatically enabled. NVIC_GetEnableIRQ() * returning false, here, implies that the IRQ line is * either not implemented or it is not enabled, thus, * currently not in use by Zephyr. */ /* Set the NVIC line to pending. */ NVIC_SetPendingIRQ(i); if (NVIC_GetPendingIRQ(i)) { /* * If the NVIC line is pending, it is * guaranteed that it is implemented; clear the * line. */ NVIC_ClearPendingIRQ(i); if (!NVIC_GetPendingIRQ(i)) { /* * If the NVIC line can be successfully * un-pended, it is guaranteed that it * can be used for software interrupt * triggering. Return the NVIC line * number. */ break; } } } } zassert_true(i >= 0, "No available IRQ line\n"); return i; } static inline void trigger_irq(int irq) { printk("Triggering irq : %d\n", irq); #if defined(CONFIG_SOC_TI_LM3S6965_QEMU) || defined(CONFIG_CPU_CORTEX_M0) \ || defined(CONFIG_CPU_CORTEX_M0PLUS) || defined(CONFIG_CPU_CORTEX_M1)\ || defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) /* QEMU does not simulate the STIR register: this is a workaround */ NVIC_SetPendingIRQ(irq); #else NVIC->STIR = irq; #endif } #elif defined(CONFIG_GIC) #include <zephyr/drivers/interrupt_controller/gic.h> #include <zephyr/dt-bindings/interrupt-controller/arm-gic.h> static inline void trigger_irq(int irq) { printk("Triggering irq : %d\n", irq); /* Ensure that the specified IRQ number is a valid SGI interrupt ID */ zassert_true(irq <= 15, "%u is not a valid SGI interrupt ID", irq); /* * Generate a software generated interrupt and forward it to the * requesting CPU. */ #if CONFIG_GIC_VER <= 2 sys_write32(GICD_SGIR_TGTFILT_REQONLY | GICD_SGIR_SGIINTID(irq), GICD_SGIR); #else uint64_t mpidr = GET_MPIDR(); uint8_t aff0 = MPIDR_AFFLVL(mpidr, 0); gic_raise_sgi(irq, mpidr, BIT(aff0)); #endif } #elif defined(CONFIG_ARC) static inline void trigger_irq(int irq) { printk("Triggering irq : %d\n", irq); z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, irq); } #elif defined(CONFIG_X86) #ifdef CONFIG_X2APIC #include <zephyr/drivers/interrupt_controller/loapic.h> #define VECTOR_MASK 0xFF #else #include <zephyr/sys/arch_interface.h> #define LOAPIC_ICR_IPI_TEST 0x00004000U #endif /* * We can emulate the interrupt by sending the IPI to * core itself by the LOAPIC for x86 platform. * * In APIC mode, Write LOAPIC's ICR to trigger IPI, * the LOAPIC_ICR_IPI_TEST 0x00004000U means: * Delivery Mode: Fixed * Destination Mode: Physical * Level: Assert * Trigger Mode: Edge * Destination Shorthand: No Shorthand * Destination: depends on cpu_id * * In X2APIC mode, this no longer works. We emulate the * interrupt by writing the IA32_X2APIC_SELF_IPI MSR * to send IPI to the core itself via LOAPIC also. * According to SDM vol.3 chapter 10.12.11, the bit[7:0] * for setting the vector is only needed. */ static inline void trigger_irq(int vector) { uint8_t i; #ifdef CONFIG_X2APIC x86_write_x2apic(LOAPIC_SELF_IPI, ((VECTOR_MASK & vector))); #else #ifdef CONFIG_SMP int cpu_id = arch_curr_cpu()->id; #else int cpu_id = 0; #endif z_loapic_ipi(cpu_id, LOAPIC_ICR_IPI_TEST, vector); #endif /* CONFIG_X2APIC */ /* * add some nop operations here to cost some cycles to make sure * the IPI interrupt is handled before do our check. */ for (i = 0; i < 10; i++) { arch_nop(); } } #elif defined(CONFIG_ARCH_POSIX) #include "irq_ctrl.h" static inline void trigger_irq(int irq) { hw_irq_ctrl_raise_im_from_sw(irq); } #elif defined(CONFIG_RISCV) static inline void trigger_irq(int irq) { uint32_t mip; __asm__ volatile ("csrrs %0, mip, %1\n" : "=r" (mip) : "r" (1 << irq)); } #elif defined(CONFIG_XTENSA) static inline void trigger_irq(int irq) { z_xt_set_intset(BIT((unsigned int)irq)); } #elif defined(CONFIG_SPARC) extern void z_sparc_enter_irq(int); static inline void trigger_irq(int irq) { z_sparc_enter_irq(irq); } #elif defined(CONFIG_MIPS) extern void z_mips_enter_irq(int); static inline void trigger_irq(int irq) { z_mips_enter_irq(irq); } #else /* So far, Nios II does not support this */ #define NO_TRIGGER_FROM_SW #endif #endif /* INTERRUPT_UTIL_H_ */
/* ------------------------------------------------------------------------- icl_demo_list_cache_initialise Type: Component method Initialise the cache and register purge method with the meta-cache. ------------------------------------------------------------------------- */ static void icl_demo_list_cache_initialise ( void) { s_cache = icl_cache_get (sizeof (icl_demo_list_t)); icl_system_register (icl_demo_list_cache_purge, icl_demo_list_cache_terminate); }
/* parse out file attributes from I/O stream * * returns WS_SUCCESS on success */ int SFTP_ParseAtributes(WOLFSSH* ssh, WS_SFTP_FILEATRB* atr) { byte buf[UINT32_SZ * 2]; int ret; WMEMSET(atr, 0, sizeof(WS_SFTP_FILEATRB)); ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ); if (ret != UINT32_SZ) { return WS_FATAL_ERROR; } ato32(buf, &atr->flags); if (atr->flags & WOLFSSH_FILEATRB_SIZE) { word32 tmp; ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ * 2); if (ret != UINT32_SZ * 2) { return WS_FATAL_ERROR; } ato32(buf, &tmp); atr->sz = tmp; atr->sz = atr->sz << 32; ato32(buf + UINT32_SZ, &tmp); atr->sz |= tmp; } if (atr->flags & WOLFSSH_FILEATRB_UIDGID) { ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ*2); if (ret != UINT32_SZ*2) { return WS_FATAL_ERROR; } ato32(buf, &atr->uid); ato32(buf+UINT32_SZ, &atr->gid); } if (atr->flags & WOLFSSH_FILEATRB_PERM) { ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ); if (ret != UINT32_SZ) { return WS_FATAL_ERROR; } ato32(buf, &atr->per); } if (atr->flags & WOLFSSH_FILEATRB_TIME) { ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ*2); if (ret != UINT32_SZ*2) { return WS_FATAL_ERROR; } ato32(buf, &atr->atime); ato32(buf+UINT32_SZ, &atr->mtime); } if (atr->flags & WOLFSSH_FILEATRB_EXT) { word32 i; word32 sz; ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ); if (ret != UINT32_SZ) { return WS_FATAL_ERROR; } ato32(buf, &atr->extCount); for (i = 0; i < atr->extCount; i++) { ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ); if (ret != UINT32_SZ) { return WS_FATAL_ERROR; } ato32(buf, &sz); if (sz > 0) { byte* tmp = (byte*)WMALLOC(sz, NULL, DYNTYPE_BUFFER); ret = wolfSSH_stream_read(ssh, tmp, sz); if (ret < 0) { WFREE(tmp, NULL, DYNTYPE_BUFFER); return ret; } WFREE(tmp, NULL, DYNTYPE_BUFFER); } ret = wolfSSH_stream_read(ssh, buf, UINT32_SZ); if (ret != UINT32_SZ) { return WS_FATAL_ERROR; } ato32(buf, &sz); if (sz > 0) { byte* tmp = (byte*)WMALLOC(sz, NULL, DYNTYPE_BUFFER); ret = wolfSSH_stream_read(ssh, tmp, sz); WFREE(tmp, NULL, DYNTYPE_BUFFER); if (ret < 0) { return ret; } } } } return WS_SUCCESS; }
/******************************************************************************* ** ** Function AVDT_SINK_Deactivate ** ** Description Deactivate SEP of A2DP Sink. In Use parameter is adjusted. ** In Use will be made TRUE in case of activation. A2DP SRC ** will receive in_use as true and will not open A2DP Sink ** connection ** ** Returns void. ** *******************************************************************************/ void AVDT_SINK_Deactivate(void) { tAVDT_SCB *p_scb = &avdt_cb.scb[0]; int i; AVDT_TRACE_DEBUG("AVDT_SINK_Deactivate"); for (i = 0; i < AVDT_NUM_SEPS; i++, p_scb++) { if ((p_scb->allocated) && (p_scb->cs.tsep == AVDT_TSEP_SNK)) { AVDT_TRACE_DEBUG("AVDT_SINK_Deactivate, found scb"); p_scb->sink_activated = FALSE; p_scb->in_use = TRUE; break; } } }
/* * jdlhuff.c * * Copyright (C) 1991-1998, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains Huffman entropy decoding routines for lossless JPEG. * * Much of the complexity here has to do with supporting input suspension. * If the data source module demands suspension, we want to be able to back * up to the start of the current MCU. To do this, we copy state variables * into local working storage, and update them back to the permanent * storage only upon successful completion of an MCU. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" #include "jlossls.h" /* Private declarations for lossless codec */ #include "jdhuff.h" /* Declarations shared with jd*huff.c */ #ifdef D_LOSSLESS_SUPPORTED typedef struct { int ci, yoffset, MCU_width; } lhd_output_ptr_info; /* * Private entropy decoder object for lossless Huffman decoding. */ typedef struct { huffd_common_fields; /* Fields shared with other entropy decoders */ /* Pointers to derived tables (these workspaces have image lifespan) */ d_derived_tbl * derived_tbls[NUM_HUFF_TBLS]; /* Precalculated info set up by start_pass for use in decode_mcus: */ /* Pointers to derived tables to be used for each data unit within an MCU */ d_derived_tbl * cur_tbls[D_MAX_DATA_UNITS_IN_MCU]; /* Pointers to the proper output difference row for each group of data units * within an MCU. For each component, there are Vi groups of Hi data units. */ JDIFFROW output_ptr[D_MAX_DATA_UNITS_IN_MCU]; /* Number of output pointers in use for the current MCU. This is the sum * of all Vi in the MCU. */ int num_output_ptrs; /* Information used for positioning the output pointers within the output * difference rows. */ lhd_output_ptr_info output_ptr_info[D_MAX_DATA_UNITS_IN_MCU]; /* Index of the proper output pointer for each data unit within an MCU */ int output_ptr_index[D_MAX_DATA_UNITS_IN_MCU]; } lhuff_entropy_decoder; typedef lhuff_entropy_decoder * lhuff_entropy_ptr; /* * Initialize for a Huffman-compressed scan. */ METHODDEF(void) start_pass_lhuff_decoder (j_decompress_ptr cinfo) { j_lossless_d_ptr losslsd = (j_lossless_d_ptr) cinfo->codec; lhuff_entropy_ptr entropy = (lhuff_entropy_ptr) losslsd->entropy_private; int ci, dctbl, sampn, ptrn, yoffset, xoffset; jpeg_component_info * compptr; for (ci = 0; ci < cinfo->comps_in_scan; ci++) { compptr = cinfo->cur_comp_info[ci]; dctbl = compptr->dc_tbl_no; /* Make sure requested tables are present */ if (dctbl < 0 || dctbl >= NUM_HUFF_TBLS || cinfo->dc_huff_tbl_ptrs[dctbl] == NULL) ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, dctbl); /* Compute derived values for Huffman tables */ /* We may do this more than once for a table, but it's not expensive */ jpeg_make_d_derived_tbl(cinfo, TRUE, dctbl, & entropy->derived_tbls[dctbl]); } /* Precalculate decoding info for each sample in an MCU of this scan */ for (sampn = 0, ptrn = 0; sampn < cinfo->data_units_in_MCU;) { compptr = cinfo->cur_comp_info[cinfo->MCU_membership[sampn]]; ci = compptr->component_index; for (yoffset = 0; yoffset < compptr->MCU_height; yoffset++, ptrn++) { /* Precalculate the setup info for each output pointer */ entropy->output_ptr_info[ptrn].ci = ci; entropy->output_ptr_info[ptrn].yoffset = yoffset; entropy->output_ptr_info[ptrn].MCU_width = compptr->MCU_width; for (xoffset = 0; xoffset < compptr->MCU_width; xoffset++, sampn++) { /* Precalculate the output pointer index for each sample */ entropy->output_ptr_index[sampn] = ptrn; /* Precalculate which table to use for each sample */ entropy->cur_tbls[sampn] = entropy->derived_tbls[compptr->dc_tbl_no]; } } } entropy->num_output_ptrs = ptrn; /* Initialize bitread state variables */ entropy->bitstate.bits_left = 0; entropy->bitstate.get_buffer = 0; /* unnecessary, but keeps Purify quiet */ entropy->insufficient_data = FALSE; } /* * Figure F.12: extend sign bit. * On some machines, a shift and add will be faster than a table lookup. */ #ifdef AVOID_TABLES #define HUFF_EXTEND(x,s) ((x) < (1<<((s)-1)) ? (x) + (((-1u)<<(s)) + 1) : (x)) #else #define HUFF_EXTEND(x,s) ((x) < extend_test[s] ? (x) + extend_offset[s] : (x)) static const int extend_test[16] = /* entry n is 2**(n-1) */ { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; static const int extend_offset[16] = /* entry n is (-1u << n) + 1 */ { 0, ((-1u)<<1) + 1, ((-1u)<<2) + 1, ((-1u)<<3) + 1, ((-1u)<<4) + 1, ((-1u)<<5) + 1, ((-1u)<<6) + 1, ((-1u)<<7) + 1, ((-1u)<<8) + 1, ((-1u)<<9) + 1, ((-1u)<<10) + 1, ((-1u)<<11) + 1, ((-1u)<<12) + 1, ((-1u)<<13) + 1, ((-1u)<<14) + 1, ((-1u)<<15) + 1 }; #endif /* AVOID_TABLES */ /* * Check for a restart marker & resynchronize decoder. * Returns FALSE if must suspend. */ METHODDEF(boolean) process_restart (j_decompress_ptr cinfo) { j_lossless_d_ptr losslsd = (j_lossless_d_ptr) cinfo->codec; lhuff_entropy_ptr entropy = (lhuff_entropy_ptr) losslsd->entropy_private; /* int ci; */ /* Throw away any unused bits remaining in bit buffer; */ /* include any full bytes in next_marker's count of discarded bytes */ cinfo->marker->discarded_bytes += entropy->bitstate.bits_left / 8; entropy->bitstate.bits_left = 0; /* Advance past the RSTn marker */ if (! (*cinfo->marker->read_restart_marker) (cinfo)) return FALSE; /* Reset out-of-data flag, unless read_restart_marker left us smack up * against a marker. In that case we will end up treating the next data * segment as empty, and we can avoid producing bogus output pixels by * leaving the flag set. */ if (cinfo->unread_marker == 0) entropy->insufficient_data = FALSE; return TRUE; } /* * Decode and return nMCU's worth of Huffman-compressed differences. * Each MCU is also disassembled and placed accordingly in diff_buf. * * MCU_col_num specifies the column of the first MCU being requested within * the MCU-row. This tells us where to position the output row pointers in * diff_buf. * * Returns the number of MCUs decoded. This may be less than nMCU if data * source requested suspension. In that case no changes have been made to * permanent state. (Exception: some output differences may already have * been assigned. This is harmless for this module, since we'll just * re-assign them on the next call.) */ METHODDEF(JDIMENSION) decode_mcus (j_decompress_ptr cinfo, JDIFFIMAGE diff_buf, JDIMENSION MCU_row_num, JDIMENSION MCU_col_num, JDIMENSION nMCU) { j_lossless_d_ptr losslsd = (j_lossless_d_ptr) cinfo->codec; lhuff_entropy_ptr entropy = (lhuff_entropy_ptr) losslsd->entropy_private; unsigned int mcu_num; int sampn, ci, yoffset, MCU_width, ptrn; BITREAD_STATE_VARS; /* Set output pointer locations based on MCU_col_num */ for (ptrn = 0; ptrn < entropy->num_output_ptrs; ptrn++) { ci = entropy->output_ptr_info[ptrn].ci; yoffset = entropy->output_ptr_info[ptrn].yoffset; MCU_width = entropy->output_ptr_info[ptrn].MCU_width; entropy->output_ptr[ptrn] = diff_buf[ci][MCU_row_num + yoffset] + (MCU_col_num * MCU_width); } /* * If we've run out of data, zero out the buffers and return. * By resetting the undifferencer, the output samples will be CENTERJSAMPLE. * * NB: We should find a way to do this without interacting with the * undifferencer module directly. */ if (entropy->insufficient_data) { for (ptrn = 0; ptrn < entropy->num_output_ptrs; ptrn++) jzero_far((void FAR *) entropy->output_ptr[ptrn], nMCU * entropy->output_ptr_info[ptrn].MCU_width * SIZEOF(JDIFF)); (*losslsd->predict_process_restart) (cinfo); } else { /* Load up working state */ BITREAD_LOAD_STATE(cinfo,entropy->bitstate); /* Outer loop handles the number of MCU requested */ for (mcu_num = 0; mcu_num < nMCU; mcu_num++) { /* Inner loop handles the samples in the MCU */ for (sampn = 0; sampn < cinfo->data_units_in_MCU; sampn++) { d_derived_tbl * dctbl = entropy->cur_tbls[sampn]; register int s, r; /* Section H.2.2: decode the sample difference */ HUFF_DECODE(s, br_state, dctbl, return mcu_num, label1); if (s) { if (s == 16) /* special case: always output 32768 */ s = 32768; else { /* normal case: fetch subsequent bits */ CHECK_BIT_BUFFER(br_state, s, return mcu_num); r = GET_BITS(s); s = HUFF_EXTEND(r, s); } } /* Output the sample difference */ *entropy->output_ptr[entropy->output_ptr_index[sampn]]++ = (JDIFF) s; } /* Completed MCU, so update state */ BITREAD_SAVE_STATE(cinfo,entropy->bitstate); } } return nMCU; } /* * Module initialization routine for lossless Huffman entropy decoding. */ GLOBAL(void) jinit_lhuff_decoder (j_decompress_ptr cinfo) { j_lossless_d_ptr losslsd = (j_lossless_d_ptr) cinfo->codec; lhuff_entropy_ptr entropy; int i; entropy = (lhuff_entropy_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(lhuff_entropy_decoder)); losslsd->entropy_private = (void *) entropy; losslsd->entropy_start_pass = start_pass_lhuff_decoder; losslsd->entropy_process_restart = process_restart; losslsd->entropy_decode_mcus = decode_mcus; /* Mark tables unallocated */ for (i = 0; i < NUM_HUFF_TBLS; i++) { entropy->derived_tbls[i] = NULL; } } #endif /* D_LOSSLESS_SUPPORTED */
/** Disables one or more mount_config flags */ static inline void mount_config_disable ( struct mount_config* const p_mount, unsigned long cfg ) { p_mount->cfg &= ~cfg; }
/** Performs the actual initialization of the library. This function should only be called through InitializeBridge */ static int __ActualLibraryInitialization (lua_State* L) { const char* libraryName = luaL_checkstring(L, 1); luaL_checktype(L, 2, LUA_TLIGHTUSERDATA); const luaL_Reg* reg = (const luaL_Reg*)lua_touserdata(L, 2); static const struct luaL_reg dummy[] = { {NULL, NULL}, }; luaL_register(L, libraryName, reg ? reg : dummy); return 1; }
/* Given FS and the full packed file content in REVPROPS->PACKED_REVPROPS, * fill the START_REVISION member, and make PACKED_REVPROPS point to the * first serialized revprop. If READ_ALL is set, initialize the SIZES * and OFFSETS members as well. * * Parse the revprops for REVPROPS->REVISION and set the PROPERTIES as * well as the SERIALIZED_SIZE member. If revprop caching has been * enabled, parse all revprops in the pack and cache them. */ static svn_error_t * parse_packed_revprops(svn_fs_t *fs, packed_revprops_t *revprops, svn_boolean_t read_all, apr_pool_t *pool, apr_pool_t *scratch_pool) { svn_stream_t *stream; apr_int64_t first_rev, count, i; apr_off_t offset; const char *header_end; apr_pool_t *iterpool = svn_pool_create(scratch_pool); svn_stringbuf_t *compressed = revprops->packed_revprops; svn_stringbuf_t *uncompressed = svn_stringbuf_create_empty(pool); SVN_ERR(svn__decompress(compressed, uncompressed, APR_SIZE_MAX)); stream = svn_stream_from_stringbuf(uncompressed, scratch_pool); SVN_ERR(svn_fs_fs__read_number_from_stream(&first_rev, NULL, stream, iterpool)); SVN_ERR(svn_fs_fs__read_number_from_stream(&count, NULL, stream, iterpool)); if ( !same_shard(fs, revprops->revision, first_rev) || !same_shard(fs, revprops->revision, first_rev + count - 1) || count < 1) return svn_error_createf(SVN_ERR_FS_CORRUPT, NULL, _("Revprop pack for revision r%ld" " contains revprops for r%ld .. r%ld"), revprops->revision, (svn_revnum_t)first_rev, (svn_revnum_t)(first_rev + count -1)); if (!svn_fs_fs__is_packed_revprop(fs, first_rev)) return svn_error_createf(SVN_ERR_FS_CORRUPT, NULL, _("Revprop pack for revision r%ld" " starts at non-packed revisions r%ld"), revprops->revision, (svn_revnum_t)first_rev); header_end = strstr(uncompressed->data, "\n\n"); if (header_end == NULL) return svn_error_create(SVN_ERR_FS_CORRUPT, NULL, _("Header end not found")); offset = header_end - uncompressed->data + 2; revprops->packed_revprops = svn_stringbuf_create_empty(pool); revprops->packed_revprops->data = uncompressed->data + offset; revprops->packed_revprops->len = (apr_size_t)(uncompressed->len - offset); revprops->packed_revprops->blocksize = (apr_size_t)(uncompressed->blocksize - offset); revprops->start_revision = (svn_revnum_t)first_rev; if (read_all) { revprops->sizes = apr_array_make(pool, (int)count, sizeof(offset)); revprops->offsets = apr_array_make(pool, (int)count, sizeof(offset)); } for (i = 0, offset = 0, revprops->total_size = 0; i < count; ++i) { apr_int64_t size; svn_string_t serialized; svn_revnum_t revision = (svn_revnum_t)(first_rev + i); svn_pool_clear(iterpool); SVN_ERR(svn_fs_fs__read_number_from_stream(&size, NULL, stream, iterpool)); if (size + offset > (apr_int64_t)revprops->packed_revprops->len) return svn_error_create(SVN_ERR_FS_CORRUPT, NULL, _("Packed revprop size exceeds pack file size")); serialized.data = revprops->packed_revprops->data + offset; serialized.len = (apr_size_t)size; if (revision == revprops->revision) { SVN_ERR(parse_revprop(&revprops->properties, fs, revision, revprops->generation, &serialized, pool, iterpool)); revprops->serialized_size = serialized.len; if (!read_all) break; } if (read_all) { APR_ARRAY_PUSH(revprops->sizes, apr_off_t) = serialized.len; APR_ARRAY_PUSH(revprops->offsets, apr_off_t) = offset; } revprops->total_size += serialized.len; offset += serialized.len; } return SVN_NO_ERROR; }
/** * Request a feature for the NTLMSSP negotiation * * @param ntlmssp_state NTLMSSP state * @param feature Bit flag specifying the requested feature */ void ntlmssp_want_feature(NTLMSSP_STATE *ntlmssp_state, uint32 feature) { if (feature & NTLMSSP_FEATURE_SESSION_KEY) { ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN; } if (feature & NTLMSSP_FEATURE_SIGN) { ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN; } if (feature & NTLMSSP_FEATURE_SEAL) { ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SEAL; } }
/* * U-boot - flash.c Flash driver for PSD4256GV * * Copyright (c) 2005-2007 Analog Devices Inc. * This file is based on BF533EzFlash.c originally written by Analog Devices, Inc. * * (C) Copyright 2000-2004 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * SPDX-License-Identifier: GPL-2.0+ */ #include <asm/io.h> #include "flash-defines.h" int AFP_NumSectors = 40; long AFP_SectorSize1 = 0x10000; int AFP_SectorSize2 = 0x4000; void flash_reset(void) { reset_flash(); } unsigned long flash_get_size(ulong baseaddr, flash_info_t * info, int bank_flag) { int id = 0, i = 0; static int FlagDev = 1; id = get_codes(); if (FlagDev) { #ifdef DEBUG printf("Device ID of the Flash is %x\n", id); #endif FlagDev = 0; } info->flash_id = id; switch (bank_flag) { case 0: for (i = PriFlashABegin; i < SecFlashABegin; i++) info->start[i] = (baseaddr + (i * AFP_SectorSize1)); info->size = 0x200000; info->sector_count = 32; break; case 1: info->start[0] = baseaddr + SecFlashASec1Off; info->start[1] = baseaddr + SecFlashASec2Off; info->start[2] = baseaddr + SecFlashASec3Off; info->start[3] = baseaddr + SecFlashASec4Off; info->size = 0x10000; info->sector_count = 4; break; case 2: info->start[0] = baseaddr + SecFlashBSec1Off; info->start[1] = baseaddr + SecFlashBSec2Off; info->start[2] = baseaddr + SecFlashBSec3Off; info->start[3] = baseaddr + SecFlashBSec4Off; info->size = 0x10000; info->sector_count = 4; break; } return (info->size); } unsigned long flash_init(void) { unsigned long size_b0, size_b1, size_b2; int i; size_b0 = size_b1 = size_b2 = 0; #ifdef DEBUG printf("Flash Memory Start 0x%x\n", CONFIG_SYS_FLASH_BASE); printf("Memory Map for the Flash\n"); printf("0x20000000 - 0x200FFFFF Flash A Primary (1MB)\n"); printf("0x20100000 - 0x201FFFFF Flash B Primary (1MB)\n"); printf("0x20200000 - 0x2020FFFF Flash A Secondary (64KB)\n"); printf("0x20280000 - 0x2028FFFF Flash B Secondary (64KB)\n"); printf("Please type command flinfo for information on Sectors \n"); #endif for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; ++i) { flash_info[i].flash_id = FLASH_UNKNOWN; } size_b0 = flash_get_size(CONFIG_SYS_FLASH0_BASE, &flash_info[0], 0); size_b1 = flash_get_size(CONFIG_SYS_FLASH0_BASE, &flash_info[1], 1); size_b2 = flash_get_size(CONFIG_SYS_FLASH0_BASE, &flash_info[2], 2); if (flash_info[0].flash_id == FLASH_UNKNOWN || size_b0 == 0) { printf("## Unknown FLASH on Bank 0 - Size = 0x%08lx = %ld MB\n", size_b0, size_b0 >> 20); } (void)flash_protect(FLAG_PROTECT_SET, CONFIG_SYS_FLASH0_BASE, (flash_info[0].start[2] - 1), &flash_info[0]); return (size_b0 + size_b1 + size_b2); } void flash_print_info(flash_info_t * info) { int i; if (info->flash_id == FLASH_UNKNOWN) { printf("missing or unknown FLASH type\n"); return; } switch (info->flash_id) { case FLASH_PSD4256GV: printf("ST Microelectronics "); break; default: printf("Unknown Vendor: (0x%08lX) ", info->flash_id); break; } for (i = 0; i < info->sector_count; ++i) { if ((i % 5) == 0) printf("\n "); printf(" %08lX%s", info->start[i], info->protect[i] ? " (RO)" : " "); } printf("\n"); return; } int flash_erase(flash_info_t * info, int s_first, int s_last) { int cnt = 0, i; int prot, sect; prot = 0; for (sect = s_first; sect <= s_last; ++sect) { if (info->protect[sect]) prot++; } if (prot) printf("- Warning: %d protected sectors will not be erased!\n", prot); else printf("\n"); cnt = s_last - s_first + 1; if (cnt == FLASH_TOT_SECT) { printf("Erasing flash, Please Wait \n"); if (erase_flash() < 0) { printf("Erasing flash failed \n"); return FLASH_FAIL; } } else { printf("Erasing Flash locations, Please Wait\n"); for (i = s_first; i <= s_last; i++) { if (info->protect[i] == 0) { /* not protected */ if (erase_block_flash(i, info->start[i]) < 0) { printf("Error Sector erasing \n"); return FLASH_FAIL; } } } } return FLASH_SUCCESS; } int write_buff(flash_info_t * info, uchar * src, ulong addr, ulong cnt) { int ret; int d; if (addr % 2) { read_flash(addr - 1 - CONFIG_SYS_FLASH_BASE, &d); d = (int)((d & 0x00FF) | (*src++ << 8)); ret = write_data(addr - 1, 2, (uchar *) & d); if (ret == FLASH_FAIL) return ERR_NOT_ERASED; ret = write_data(addr + 1, cnt - 1, src); } else ret = write_data(addr, cnt, src); if (ret == FLASH_FAIL) return ERR_NOT_ERASED; return FLASH_SUCCESS; } int write_data(long lStart, long lCount, uchar * pnData) { long i = 0; unsigned long ulOffset = lStart - CONFIG_SYS_FLASH_BASE; int d; int nSector = 0; int flag = 0; if (lCount % 2) { flag = 1; lCount = lCount - 1; } for (i = 0; i < lCount - 1; i += 2, ulOffset += 2) { get_sector_number(ulOffset, &nSector); read_flash(ulOffset, &d); if (d != 0xffff) { printf ("Flash not erased at offset 0x%lx Please erase to reprogram\n", ulOffset); return FLASH_FAIL; } unlock_flash(ulOffset); d = (int)(pnData[i] | pnData[i + 1] << 8); write_flash(ulOffset, d); if (poll_toggle_bit(ulOffset) < 0) { printf("Error programming the flash \n"); return FLASH_FAIL; } if ((i > 0) && (!(i % AFP_SectorSize2))) printf("."); } if (flag) { get_sector_number(ulOffset, &nSector); read_flash(ulOffset, &d); if (d != 0xffff) { printf ("Flash not erased at offset 0x%lx Please erase to reprogram\n", ulOffset); return FLASH_FAIL; } unlock_flash(ulOffset); d = (int)(pnData[i] | (d & 0xFF00)); write_flash(ulOffset, d); if (poll_toggle_bit(ulOffset) < 0) { printf("Error programming the flash \n"); return FLASH_FAIL; } } return FLASH_SUCCESS; } int read_data(long ulStart, long lCount, long lStride, int *pnData) { long i = 0; int j = 0; long ulOffset = ulStart; int iShift = 0; int iNumWords = 2; int nLeftover = lCount % 4; int nHi, nLow; int nSector = 0; for (i = 0; (i < lCount / 4) && (i < BUFFER_SIZE); i++) { for (iShift = 0, j = 0; j < iNumWords; j += 2) { if ((ulOffset >= INVALIDLOCNSTART) && (ulOffset < INVALIDLOCNEND)) return FLASH_FAIL; get_sector_number(ulOffset, &nSector); read_flash(ulOffset, &nLow); ulOffset += (lStride * 2); read_flash(ulOffset, &nHi); ulOffset += (lStride * 2); pnData[i] = (nHi << 16) | nLow; } } if (nLeftover > 0) { if ((ulOffset >= INVALIDLOCNSTART) && (ulOffset < INVALIDLOCNEND)) return FLASH_FAIL; get_sector_number(ulOffset, &nSector); read_flash(ulOffset, &pnData[i]); } return FLASH_SUCCESS; } int write_flash(long nOffset, int nValue) { long addr; addr = (CONFIG_SYS_FLASH_BASE + nOffset); SSYNC(); *(unsigned volatile short *)addr = nValue; SSYNC(); if (poll_toggle_bit(nOffset) < 0) return FLASH_FAIL; return FLASH_SUCCESS; } int read_flash(long nOffset, int *pnValue) { int nValue = 0x0; long addr = (CONFIG_SYS_FLASH_BASE + nOffset); if (nOffset != 0x2) reset_flash(); SSYNC(); nValue = *(volatile unsigned short *)addr; SSYNC(); *pnValue = nValue; return true; } int poll_toggle_bit(long lOffset) { unsigned int u1, u2; unsigned long timeout = 0xFFFFFFFF; volatile unsigned long *FB = (volatile unsigned long *)(0x20000000 + lOffset); while (1) { if (timeout < 0) break; u1 = *(volatile unsigned short *)FB; u2 = *(volatile unsigned short *)FB; if ((u1 & 0x0040) == (u2 & 0x0040)) return FLASH_SUCCESS; if ((u2 & 0x0020) == 0x0000) continue; u1 = *(volatile unsigned short *)FB; if ((u2 & 0x0040) == (u1 & 0x0040)) return FLASH_SUCCESS; else { reset_flash(); return FLASH_FAIL; } timeout--; } printf("Time out occured \n"); if (timeout < 0) return FLASH_FAIL; } void reset_flash(void) { write_flash(WRITESEQ1, RESET_VAL); /* Wait for 10 micro seconds */ udelay(10); } int erase_flash(void) { write_flash(WRITESEQ1, WRITEDATA1); write_flash(WRITESEQ2, WRITEDATA2); write_flash(WRITESEQ3, WRITEDATA3); write_flash(WRITESEQ4, WRITEDATA4); write_flash(WRITESEQ5, WRITEDATA5); write_flash(WRITESEQ6, WRITEDATA6); if (poll_toggle_bit(0x0000) < 0) return FLASH_FAIL; write_flash(SecFlashAOff + WRITESEQ1, WRITEDATA1); write_flash(SecFlashAOff + WRITESEQ2, WRITEDATA2); write_flash(SecFlashAOff + WRITESEQ3, WRITEDATA3); write_flash(SecFlashAOff + WRITESEQ4, WRITEDATA4); write_flash(SecFlashAOff + WRITESEQ5, WRITEDATA5); write_flash(SecFlashAOff + WRITESEQ6, WRITEDATA6); if (poll_toggle_bit(SecFlashASec1Off) < 0) return FLASH_FAIL; write_flash(PriFlashBOff + WRITESEQ1, WRITEDATA1); write_flash(PriFlashBOff + WRITESEQ2, WRITEDATA2); write_flash(PriFlashBOff + WRITESEQ3, WRITEDATA3); write_flash(PriFlashBOff + WRITESEQ4, WRITEDATA4); write_flash(PriFlashBOff + WRITESEQ5, WRITEDATA5); write_flash(PriFlashBOff + WRITESEQ6, WRITEDATA6); if (poll_toggle_bit(PriFlashBOff) < 0) return FLASH_FAIL; write_flash(SecFlashBOff + WRITESEQ1, WRITEDATA1); write_flash(SecFlashBOff + WRITESEQ2, WRITEDATA2); write_flash(SecFlashBOff + WRITESEQ3, WRITEDATA3); write_flash(SecFlashBOff + WRITESEQ4, WRITEDATA4); write_flash(SecFlashBOff + WRITESEQ5, WRITEDATA5); write_flash(SecFlashBOff + WRITESEQ6, WRITEDATA6); if (poll_toggle_bit(SecFlashBOff) < 0) return FLASH_FAIL; return FLASH_SUCCESS; } int erase_block_flash(int nBlock, unsigned long address) { long ulSectorOff = 0x0; if ((nBlock < 0) || (nBlock > AFP_NumSectors)) return false; ulSectorOff = (address - CONFIG_SYS_FLASH_BASE); write_flash((WRITESEQ1 | ulSectorOff), WRITEDATA1); write_flash((WRITESEQ2 | ulSectorOff), WRITEDATA2); write_flash((WRITESEQ3 | ulSectorOff), WRITEDATA3); write_flash((WRITESEQ4 | ulSectorOff), WRITEDATA4); write_flash((WRITESEQ5 | ulSectorOff), WRITEDATA5); write_flash(ulSectorOff, BlockEraseVal); if (poll_toggle_bit(ulSectorOff) < 0) return FLASH_FAIL; return FLASH_SUCCESS; } void unlock_flash(long ulOffset) { unsigned long ulOffsetAddr = ulOffset; ulOffsetAddr &= 0xFFFF0000; write_flash((WRITESEQ1 | ulOffsetAddr), UNLOCKDATA1); write_flash((WRITESEQ2 | ulOffsetAddr), UNLOCKDATA2); write_flash((WRITESEQ3 | ulOffsetAddr), UNLOCKDATA3); } int get_codes() { int dev_id = 0; write_flash(WRITESEQ1, GETCODEDATA1); write_flash(WRITESEQ2, GETCODEDATA2); write_flash(WRITESEQ3, GETCODEDATA3); read_flash(0x0002, &dev_id); dev_id &= 0x00FF; reset_flash(); return dev_id; } void get_sector_number(long ulOffset, int *pnSector) { int nSector = 0; if (ulOffset >= SecFlashAOff) { if ((ulOffset < SecFlashASec1Off) && (ulOffset < SecFlashASec2Off)) { nSector = SECT32; } else if ((ulOffset >= SecFlashASec2Off) && (ulOffset < SecFlashASec3Off)) { nSector = SECT33; } else if ((ulOffset >= SecFlashASec3Off) && (ulOffset < SecFlashASec4Off)) { nSector = SECT34; } else if ((ulOffset >= SecFlashASec4Off) && (ulOffset < SecFlashAEndOff)) { nSector = SECT35; } } else if (ulOffset >= SecFlashBOff) { if ((ulOffset < SecFlashBSec1Off) && (ulOffset < SecFlashBSec2Off)) { nSector = SECT36; } if ((ulOffset < SecFlashBSec2Off) && (ulOffset < SecFlashBSec3Off)) { nSector = SECT37; } if ((ulOffset < SecFlashBSec3Off) && (ulOffset < SecFlashBSec4Off)) { nSector = SECT38; } if ((ulOffset < SecFlashBSec4Off) && (ulOffset < SecFlashBEndOff)) { nSector = SECT39; } } else if ((ulOffset >= PriFlashAOff) && (ulOffset < SecFlashAOff)) { nSector = ulOffset & 0xffff0000; nSector = ulOffset >> 16; nSector = nSector & 0x000ff; } if ((nSector >= 0) && (nSector < AFP_NumSectors)) { *pnSector = nSector; } }
/* Not really used, but needed in the ada_language_defn. */ static void emit_char (int c, struct type *type, struct ui_file *stream, int quoter) { ada_emit_char (c, type, stream, quoter, 1); }
// Start the request for b. Caller must hold idelock. static void idestart(struct buf *b) { if(b == 0) panic("idestart"); if(b->blockno >= FSSIZE) panic("incorrect blockno"); int sector_per_block = BSIZE/SECTOR_SIZE; int sector = b->blockno * sector_per_block; int read_cmd = (sector_per_block == 1) ? IDE_CMD_READ : IDE_CMD_RDMUL; int write_cmd = (sector_per_block == 1) ? IDE_CMD_WRITE : IDE_CMD_WRMUL; if (sector_per_block > 7) panic("idestart"); idewait(0); outb(IDE_REG_CTRL, 0); outb(0x1f2, sector_per_block); outb(0x1f3, sector & 0xff); outb(0x1f4, (sector >> 8) & 0xff); outb(0x1f5, (sector >> 16) & 0xff); outb(0x1f6, 0xe0 | ((b->dev&1)<<4) | ((sector>>24)&0x0f)); if(b->flags & B_DIRTY){ outb(IDE_REG_STATUS, write_cmd); outsl(0x1f0, b->data, BSIZE/4); } else { outb(IDE_REG_STATUS, read_cmd); } }
/* ** Return the current file-size of an vlog-file. */ static int vlogFileSize(sqlite3_file *pFile, sqlite_int64 *pSize) { int rc; sqlite3_uint64 tStart, tElapse; VLogFile *p = (VLogFile *) pFile; tStart = vlog_time(); rc = p->pReal->pMethods->xFileSize(p->pReal, pSize); tElapse = vlog_time() - tStart; vlogLogPrint(p->pLog, tStart, tElapse, VLOG_OP_FILESIZE, *pSize, -1, 0, rc); return rc; }
/*========================================================================= * * Copyright NumFOCUS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #ifndef itkWhitakerSparseLevelSetImage_h #define itkWhitakerSparseLevelSetImage_h #include "itkLevelSetSparseImage.h" #include "itkLabelObject.h" #include "itkLabelMap.h" namespace itk { /** * \class WhitakerSparseLevelSetImage * \brief Derived class for the sparse-field representation of level-set function * * This representation is a "sparse" level-set function, where values are * real in between [ -3, +3 ] and organized into several layers { -2, -1, * 0, +1, +2 }. * * \tparam TOutput Output type (float or double) of the level set function * \tparam VDimension Dimension of the input space * \ingroup ITKLevelSetsv4 */ template <typename TOutput, unsigned int VDimension> class ITK_TEMPLATE_EXPORT WhitakerSparseLevelSetImage : public LevelSetSparseImage<TOutput, VDimension> { public: ITK_DISALLOW_COPY_AND_MOVE(WhitakerSparseLevelSetImage); using Self = WhitakerSparseLevelSetImage; using Pointer = SmartPointer<Self>; using ConstPointer = SmartPointer<const Self>; using Superclass = LevelSetSparseImage<TOutput, VDimension>; /** Method for creation through the object factory. */ itkNewMacro(Self); /** Run-time type information (and related methods). */ itkTypeMacro(WhitakerSparseLevelSetImage, LevelSetSparseImage); static constexpr unsigned int Dimension = VDimension; using typename Superclass::InputType; using typename Superclass::OutputType; using typename Superclass::OutputRealType; using typename Superclass::GradientType; using typename Superclass::HessianType; using typename Superclass::LevelSetDataType; using typename Superclass::LayerIdType; using typename Superclass::LabelObjectType; using typename Superclass::LabelObjectPointer; using typename Superclass::LabelObjectLengthType; using typename Superclass::LabelObjectLineType; using typename Superclass::LabelMapType; using typename Superclass::LabelMapPointer; using typename Superclass::LabelMapConstPointer; using typename Superclass::RegionType; using typename Superclass::LayerType; using typename Superclass::LayerIterator; using typename Superclass::LayerConstIterator; using typename Superclass::LayerMapType; using typename Superclass::LayerMapIterator; using typename Superclass::LayerMapConstIterator; /** Returns the value of the level set function at a given location iP */ using Superclass::Evaluate; OutputType Evaluate(const InputType & inputIndex) const override; #ifdef ITK_USE_CONCEPT_CHECKING // Begin concept checking itkConceptMacro(DoubleConvertible, (Concept::Convertible<OutputRealType, OutputType>)); // End concept checking #endif // ITK_USE_CONCEPT_CHECKING static inline LayerIdType MinusThreeLayer() { return -3; } static inline LayerIdType MinusTwoLayer() { return -2; } static inline LayerIdType MinusOneLayer() { return -1; } static inline LayerIdType ZeroLayer() { return 0; } static inline LayerIdType PlusOneLayer() { return 1; } static inline LayerIdType PlusTwoLayer() { return 2; } static inline LayerIdType PlusThreeLayer() { return 3; } /** Return the label object pointer with a given id */ template <typename TLabel> typename LabelObject<TLabel, Dimension>::Pointer GetAsLabelObject() { using OutputLabelObjectType = LabelObject<TLabel, Dimension>; auto object = OutputLabelObjectType::New(); for (LayerIdType status = this->MinusThreeLayer(); status < this->PlusOneLayer(); ++status) { LabelObjectPointer labelObject = this->m_LabelMap->GetLabelObject(status); for (SizeValueType i = 0; i < labelObject->GetNumberOfLines(); ++i) { object->AddLine(labelObject->GetLine(i)); } } object->Optimize(); return object; } protected: WhitakerSparseLevelSetImage(); ~WhitakerSparseLevelSetImage() override = default; /** Initialize the sparse field layers */ void InitializeLayers() override; void InitializeInternalLabelList() override; }; } // namespace itk #ifndef ITK_MANUAL_INSTANTIATION # include "itkWhitakerSparseLevelSetImage.hxx" #endif #endif // itkWhitakerSparseLevelSetImage_h
/* -*- mode: c; c-basic-offset: 8 -*- */ /* SNI RM driver * * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ /* * Based on lasi700.c */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/delay.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Thomas Bogendörfer"); MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:snirm_53c710"); #define SNIRM710_CLOCK 32 static struct scsi_host_template snirm710_template = { .name = "SNI RM SCSI 53c710", .proc_name = "snirm_53c710", .this_id = 7, .module = THIS_MODULE, }; static int snirm710_probe(struct platform_device *dev) { unsigned long base; struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; struct resource *res; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; base = res->start; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); if (!hostdata) { dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); return -ENOMEM; } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); hostdata->base = ioremap_nocache(base, 0x100); hostdata->differential = 0; hostdata->clock = SNIRM710_CLOCK; hostdata->force_le_on_be = 1; hostdata->chip710 = 1; hostdata->burst_length = 4; host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev); if (!host) goto out_kfree; host->this_id = 7; host->base = base; host->irq = platform_get_irq(dev, 0); if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { printk(KERN_ERR "snirm710: request_irq failed!\n"); goto out_put_host; } dev_set_drvdata(&dev->dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_kfree: iounmap(hostdata->base); kfree(hostdata); return -ENODEV; } static int __exit snirm710_driver_remove(struct platform_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; scsi_remove_host(host); NCR_700_release(host); free_irq(host->irq, host); iounmap(hostdata->base); kfree(hostdata); return 0; } static struct platform_driver snirm710_driver = { .probe = snirm710_probe, .remove = snirm710_driver_remove, .driver = { .name = "snirm_53c710", .owner = THIS_MODULE, }, }; static int __init snirm710_init(void) { return platform_driver_register(&snirm710_driver); } static void __exit snirm710_exit(void) { platform_driver_unregister(&snirm710_driver); } module_init(snirm710_init); module_exit(snirm710_exit);
/* * Set up to collect the data for the sampled function. This is used for * those alternate tint transforms that cannot be converted into a * type 4 function. */ static int sampled_data_setup(i_ctx_t *i_ctx_p, gs_function_t *pfn, const ref * pproc, int (*finish_proc)(i_ctx_t *), gs_memory_t * mem) { os_ptr op = osp; gs_sampled_data_enum *penum; int i; gs_function_Sd_params_t * params = (gs_function_Sd_params_t *)&pfn->params; check_estack(estack_storage + 1); check_ostack(params->m + O_STACK_PAD); check_ostack(params->n + O_STACK_PAD); penum = gs_sampled_data_enum_alloc(imemory, "zbuildsampledfuntion(params)"); if (penum == NULL) return_error(gs_error_VMerror); penum->pfn = pfn; for(i=0; i< params->m; i++) penum->indexes[i] = 0; penum->o_stack_depth = ref_stack_count(&o_stack); push(O_STACK_PAD); for (i = 0; i < O_STACK_PAD; i++) make_null(op - i); esp += estack_storage; make_op_estack(esp - 2, finish_proc); sample_proc = *pproc; make_istruct(esp, 0, penum); push_op_estack(sampled_data_sample); return o_push_estack; }
/** * mono_class_is_subclass_of: * @klass: class to probe if it is a subclass of another one * @klassc: the class we suspect is the base class * @check_interfaces: whether we should perform interface checks * * This method determines whether @klass is a subclass of @klassc. * * If the @check_interfaces flag is set, then if @klassc is an interface * this method return true if the @klass implements the interface or * if @klass is an interface, if one of its base classes is @klass. * * If @check_interfaces is false then, then if @klass is not an interface * then it returns true if the @klass is a subclass of @klassc. * * if @klass is an interface and @klassc is System.Object, then this function * return true. * */ gboolean mono_class_is_subclass_of (MonoClass *klass, MonoClass *klassc, gboolean check_interfaces) { if (check_interfaces && MONO_CLASS_IS_INTERFACE (klassc) && !MONO_CLASS_IS_INTERFACE (klass)) { if (MONO_CLASS_IMPLEMENTS_INTERFACE (klass, klassc->interface_id)) return TRUE; } else if (check_interfaces && MONO_CLASS_IS_INTERFACE (klassc) && MONO_CLASS_IS_INTERFACE (klass)) { int i; for (i = 0; i < klass->interface_count; i ++) { MonoClass *ic = klass->interfaces [i]; if (ic == klassc) return TRUE; } } else { if (!MONO_CLASS_IS_INTERFACE (klass) && mono_class_has_parent (klass, klassc)) return TRUE; } if (klassc == mono_defaults.object_class) return TRUE; return FALSE; }
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * check_backup.c -- pre-check backup */ #include <stddef.h> #include <stdint.h> #include <unistd.h> #include "out.h" #include "file.h" #include "os.h" #include "libpmempool.h" #include "pmempool.h" #include "pool.h" #include "check_util.h" enum question { Q_OVERWRITE_EXISTING_FILE, Q_OVERWRITE_EXISTING_PARTS }; /* * location_release -- (internal) release poolset structure */ static void location_release(location *loc) { if (loc->set) { util_poolset_free(loc->set); loc->set = NULL; } } /* * backup_nonpoolset_requirements -- (internal) check backup requirements */ static int backup_nonpoolset_requirements(PMEMpoolcheck *ppc, location *loc) { LOG(3, "backup_path %s", ppc->backup_path); int exists = util_file_exists(ppc->backup_path); if (exists < 0) { return CHECK_ERR(ppc, "unable to access the backup destination: %s", ppc->backup_path); } if (!exists) { errno = 0; return 0; } if ((size_t)util_file_get_size(ppc->backup_path) != ppc->pool->set_file->size) { ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "destination of the backup does not match the size of the source pool file: %s", ppc->backup_path); } if (CHECK_WITHOUT_FIXING(ppc)) { location_release(loc); loc->step = CHECK_STEP_COMPLETE; return 0; } CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_FILE, "destination of the backup already exists.|Do you want to overwrite it?"); return check_questions_sequence_validate(ppc); } /* * backup_nonpoolset_overwrite -- (internal) overwrite pool */ static int backup_nonpoolset_overwrite(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *context) { LOG(3, NULL); ASSERTne(loc, NULL); switch (question) { case Q_OVERWRITE_EXISTING_FILE: if (pool_copy(ppc->pool, ppc->backup_path, 1 /* overwrite */)) { location_release(loc); ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "cannot perform backup"); } location_release(loc); loc->step = CHECK_STEP_COMPLETE; return 0; default: ERR("not implemented question id: %u", question); } return 0; } /* * backup_nonpoolset_create -- (internal) create backup */ static int backup_nonpoolset_create(PMEMpoolcheck *ppc, location *loc) { CHECK_INFO(ppc, "creating backup file: %s", ppc->backup_path); if (pool_copy(ppc->pool, ppc->backup_path, 0)) { location_release(loc); ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "cannot perform backup"); } location_release(loc); loc->step = CHECK_STEP_COMPLETE; return 0; } /* * backup_poolset_requirements -- (internal) check backup requirements */ static int backup_poolset_requirements(PMEMpoolcheck *ppc, location *loc) { LOG(3, "backup_path %s", ppc->backup_path); if (ppc->pool->set_file->poolset->nreplicas > 1) { CHECK_INFO(ppc, "backup of a poolset with multiple replicas is not supported"); goto err; } if (pool_set_parse(&loc->set, ppc->backup_path)) { CHECK_INFO_ERRNO(ppc, "invalid poolset backup file: %s", ppc->backup_path); goto err; } if (loc->set->nreplicas > 1) { CHECK_INFO(ppc, "backup to a poolset with multiple replicas is not supported"); goto err_poolset; } ASSERTeq(loc->set->nreplicas, 1); struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0]; struct pool_replica *drep = loc->set->replica[0]; if (srep->nparts != drep->nparts) { CHECK_INFO(ppc, "number of part files in the backup poolset must match number of part files in the source poolset"); goto err_poolset; } int overwrite_required = 0; for (unsigned p = 0; p < srep->nparts; p++) { int exists = util_file_exists(drep->part[p].path); if (exists < 0) { CHECK_INFO(ppc, "unable to access the part of the destination poolset: %s", ppc->backup_path); goto err_poolset; } if (srep->part[p].filesize != drep->part[p].filesize) { CHECK_INFO(ppc, "size of the part %u of the backup poolset does not match source poolset", p); goto err_poolset; } if (!exists) { errno = 0; continue; } overwrite_required = true; if ((size_t)util_file_get_size(drep->part[p].path) != srep->part[p].filesize) { CHECK_INFO(ppc, "destination of the backup part does not match size of the source part file: %s", drep->part[p].path); goto err_poolset; } } if (CHECK_WITHOUT_FIXING(ppc)) { location_release(loc); loc->step = CHECK_STEP_COMPLETE; return 0; } if (overwrite_required) { CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_PARTS, "part files of the destination poolset of the backup already exist.|" "Do you want to overwrite them?"); } return check_questions_sequence_validate(ppc); err_poolset: location_release(loc); err: ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "unable to backup poolset"); } /* * backup_poolset -- (internal) backup the poolset */ static int backup_poolset(PMEMpoolcheck *ppc, location *loc, int overwrite) { struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0]; struct pool_replica *drep = loc->set->replica[0]; for (unsigned p = 0; p < srep->nparts; p++) { if (overwrite == 0) { CHECK_INFO(ppc, "creating backup file: %s", drep->part[p].path); } if (pool_set_part_copy(&drep->part[p], &srep->part[p], overwrite)) { location_release(loc); ppc->result = CHECK_RESULT_ERROR; CHECK_INFO(ppc, "unable to create backup file"); return CHECK_ERR(ppc, "unable to backup poolset"); } } return 0; } /* * backup_poolset_overwrite -- (internal) backup poolset with overwrite */ static int backup_poolset_overwrite(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *context) { LOG(3, NULL); ASSERTne(loc, NULL); switch (question) { case Q_OVERWRITE_EXISTING_PARTS: if (backup_poolset(ppc, loc, 1 /* overwrite */)) { location_release(loc); ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "cannot perform backup"); } location_release(loc); loc->step = CHECK_STEP_COMPLETE; return 0; default: ERR("not implemented question id: %u", question); } return 0; } /* * backup_poolset_create -- (internal) backup poolset */ static int backup_poolset_create(PMEMpoolcheck *ppc, location *loc) { if (backup_poolset(ppc, loc, 0)) { location_release(loc); ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "cannot perform backup"); } location_release(loc); loc->step = CHECK_STEP_COMPLETE; return 0; } struct step { int (*check)(PMEMpoolcheck *, location *); int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *); int poolset; }; static const struct step steps[] = { { .check = backup_nonpoolset_requirements, .poolset = false, }, { .fix = backup_nonpoolset_overwrite, .poolset = false, }, { .check = backup_nonpoolset_create, .poolset = false }, { .check = backup_poolset_requirements, .poolset = true, }, { .fix = backup_poolset_overwrite, .poolset = true, }, { .check = backup_poolset_create, .poolset = true }, { .check = NULL, .fix = NULL, }, }; /* * step_exe -- (internal) perform single step according to its parameters */ static int step_exe(PMEMpoolcheck *ppc, location *loc) { ASSERT(loc->step < ARRAY_SIZE(steps)); const struct step *step = &steps[loc->step++]; if (step->poolset == 0 && ppc->pool->params.is_poolset == 1) return 0; if (!step->fix) return step->check(ppc, loc); if (!check_has_answer(ppc->data)) return 0; if (check_answer_loop(ppc, loc, NULL, 1, step->fix)) return -1; ppc->result = CHECK_RESULT_CONSISTENT; return 0; } /* * check_backup -- perform backup if requested and needed */ void check_backup(PMEMpoolcheck *ppc) { LOG(3, "backup_path %s", ppc->backup_path); if (ppc->backup_path == NULL) return; location *loc = check_get_step_data(ppc->data); /* do all checks */ while (CHECK_NOT_COMPLETE(loc, steps)) { if (step_exe(ppc, loc)) break; } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm_runtime.h * \brief Pack all tvm runtime source files */ #include <sys/stat.h> #include <fstream> #define DMLC_USE_LOGGING_LIBRARY <tvm/runtime/logging.h> #define TVM_USE_LIBBACKTRACE 0 /* Enable custom logging - this will cause TVM to use a custom implementation * of tvm::runtime::detail::LogMessage. We use this to pass TVM log messages to * Android logcat. */ #define TVM_LOG_CUSTOMIZE 1 #include "../src/runtime/c_runtime_api.cc" #include "../src/runtime/cpu_device_api.cc" #include "../src/runtime/dso_library.cc" #include "../src/runtime/file_utils.cc" #include "../src/runtime/graph_executor/graph_executor.cc" #include "../src/runtime/library_module.cc" #include "../src/runtime/logging.cc" #include "../src/runtime/minrpc/minrpc_logger.cc" #include "../src/runtime/module.cc" #include "../src/runtime/ndarray.cc" #include "../src/runtime/object.cc" #include "../src/runtime/profiling.cc" #include "../src/runtime/registry.cc" #include "../src/runtime/rpc/rpc_channel.cc" #include "../src/runtime/rpc/rpc_endpoint.cc" #include "../src/runtime/rpc/rpc_event_impl.cc" #include "../src/runtime/rpc/rpc_local_session.cc" #include "../src/runtime/rpc/rpc_module.cc" #include "../src/runtime/rpc/rpc_server_env.cc" #include "../src/runtime/rpc/rpc_session.cc" #include "../src/runtime/rpc/rpc_socket_impl.cc" #include "../src/runtime/system_library.cc" #include "../src/runtime/thread_pool.cc" #include "../src/runtime/threading_backend.cc" #include "../src/runtime/workspace_pool.cc" #ifdef TVM_OPENCL_RUNTIME #include "../src/runtime/opencl/opencl_device_api.cc" #include "../src/runtime/opencl/opencl_module.cc" #include "../src/runtime/opencl/opencl_wrapper/opencl_wrapper.cc" #include "../src/runtime/opencl/texture_pool.cc" #include "../src/runtime/source_utils.cc" #endif #ifdef TVM_VULKAN_RUNTIME #include "../src/runtime/vulkan/vulkan.cc" #endif #ifdef USE_SORT #include "../src/runtime/contrib/sort/sort.cc" #endif #include <android/log.h> namespace tvm { namespace runtime { namespace detail { // Override logging mechanism [[noreturn]] void LogFatalImpl(const std::string& file, int lineno, const std::string& message) { std::string m = file + ":" + std::to_string(lineno) + ": " + message; __android_log_write(ANDROID_LOG_FATAL, "TVM_RUNTIME", m.c_str()); throw InternalError(file, lineno, message); } void LogMessageImpl(const std::string& file, int lineno, int level, const std::string& message) { std::string m = file + ":" + std::to_string(lineno) + ": " + message; __android_log_write(ANDROID_LOG_DEBUG + level, "TVM_RUNTIME", m.c_str()); } } // namespace detail } // namespace runtime } // namespace tvm
// SPDX-License-Identifier: GPL-2.0 /* * arch/arm/kernel/kgdb.c * * ARM KGDB support * * Copyright (c) 2002-2004 MontaVista Software, Inc * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: George Davis <davis_g@mvista.com> * Deepak Saxena <dsaxena@plexity.net> */ #include <linux/irq.h> #include <linux/kdebug.h> #include <linux/kgdb.h> #include <linux/uaccess.h> #include <asm/patch.h> #include <asm/traps.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, { "r1", 4, offsetof(struct pt_regs, ARM_r1)}, { "r2", 4, offsetof(struct pt_regs, ARM_r2)}, { "r3", 4, offsetof(struct pt_regs, ARM_r3)}, { "r4", 4, offsetof(struct pt_regs, ARM_r4)}, { "r5", 4, offsetof(struct pt_regs, ARM_r5)}, { "r6", 4, offsetof(struct pt_regs, ARM_r6)}, { "r7", 4, offsetof(struct pt_regs, ARM_r7)}, { "r8", 4, offsetof(struct pt_regs, ARM_r8)}, { "r9", 4, offsetof(struct pt_regs, ARM_r9)}, { "r10", 4, offsetof(struct pt_regs, ARM_r10)}, { "fp", 4, offsetof(struct pt_regs, ARM_fp)}, { "ip", 4, offsetof(struct pt_regs, ARM_ip)}, { "sp", 4, offsetof(struct pt_regs, ARM_sp)}, { "lr", 4, offsetof(struct pt_regs, ARM_lr)}, { "pc", 4, offsetof(struct pt_regs, ARM_pc)}, { "f0", 12, -1 }, { "f1", 12, -1 }, { "f2", 12, -1 }, { "f3", 12, -1 }, { "f4", 12, -1 }, { "f5", 12, -1 }, { "f6", 12, -1 }, { "f7", 12, -1 }, { "fps", 4, -1 }, { "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)}, }; char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); else memset(mem, 0, dbg_reg_def[regno].size); return dbg_reg_def[regno].name; } int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return -EINVAL; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { struct thread_info *ti; int regno; /* Just making sure... */ if (task == NULL) return; /* Initialize to zero */ for (regno = 0; regno < GDB_MAX_REGS; regno++) gdb_regs[regno] = 0; /* Otherwise, we have only some registers from switch_to() */ ti = task_thread_info(task); gdb_regs[_R4] = ti->cpu_context.r4; gdb_regs[_R5] = ti->cpu_context.r5; gdb_regs[_R6] = ti->cpu_context.r6; gdb_regs[_R7] = ti->cpu_context.r7; gdb_regs[_R8] = ti->cpu_context.r8; gdb_regs[_R9] = ti->cpu_context.r9; gdb_regs[_R10] = ti->cpu_context.sl; gdb_regs[_FP] = ti->cpu_context.fp; gdb_regs[_SPT] = ti->cpu_context.sp; gdb_regs[_PC] = ti->cpu_context.pc; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) { regs->ARM_pc = pc; } static int compiled_break; int kgdb_arch_handle_exception(int exception_vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; switch (remcom_in_buffer[0]) { case 'D': case 'k': case 'c': /* * Try to read optional parameter, pc unchanged if no parm. * If this was a compiled breakpoint, we need to move * to the next instruction or we will just breakpoint * over and over again. */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ARM_pc = addr; else if (compiled_break == 1) linux_regs->ARM_pc += 4; compiled_break = 0; return 0; } return -1; } static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) { kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) { compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } static struct undef_hook kgdb_brkpt_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; static struct undef_hook kgdb_compiled_brkpt_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; if (kgdb_handle_exception(1, args->signr, cmd, regs)) return NOTIFY_DONE; return NOTIFY_STOP; } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, .priority = -INT_MAX, }; /** * kgdb_arch_init - Perform any architecture specific initalization. * * This function will handle the initalization of any architecture * specific callbacks. */ int kgdb_arch_init(void) { int ret = register_die_notifier(&kgdb_notifier); if (ret != 0) return ret; register_undef_hook(&kgdb_brkpt_hook); register_undef_hook(&kgdb_compiled_brkpt_hook); return 0; } /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ void kgdb_arch_exit(void) { unregister_undef_hook(&kgdb_brkpt_hook); unregister_undef_hook(&kgdb_compiled_brkpt_hook); unregister_die_notifier(&kgdb_notifier); } int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; /* patch_text() only supports int-sized breakpoints */ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; /* Machine is already stopped, so we can use __patch_text() directly */ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); return err; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { /* Machine is already stopped, so we can use __patch_text() directly */ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); return 0; } /* * Register our undef instruction hooks with ARM undef core. * We register a hook specifically looking for the KGB break inst * and we handle the normal undef case within the do_undefinstr * handler. */ const struct kgdb_arch arch_kgdb_ops = { #ifndef __ARMEB__ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} #else /* ! __ARMEB__ */ .gdb_bpt_instr = {0xe7, 0xff, 0xde, 0xfe} #endif };
/* ---------- * RI_FKey_setdefault_upd - * * Set foreign key references to defaults at update event on PK table. * ---------- */ Datum RI_FKey_setdefault_upd(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; RI_ConstraintInfo riinfo; Relation fk_rel; Relation pk_rel; HeapTuple new_row; HeapTuple old_row; RI_QueryKey qkey; SPIPlanPtr qplan; ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_upd", RI_TRIGTYPE_UPDATE); ri_FetchConstraintInfo(&riinfo, trigdata->tg_trigger, trigdata->tg_relation, true); if (riinfo.nkeys == 0) return PointerGetDatum(NULL); fk_rel = heap_open(riinfo.fk_relid, RowExclusiveLock); pk_rel = trigdata->tg_relation; new_row = trigdata->tg_newtuple; old_row = trigdata->tg_trigtuple; switch (riinfo.confmatchtype) { case FKCONSTR_MATCH_UNSPECIFIED: case FKCONSTR_MATCH_FULL: ri_BuildQueryKeyFull(&qkey, &riinfo, RI_PLAN_SETNULL_DEL_DOUPDATE); switch (ri_NullCheck(pk_rel, old_row, &qkey, RI_KEYPAIR_PK_IDX)) { case RI_KEYS_ALL_NULL: case RI_KEYS_SOME_NULL: heap_close(fk_rel, RowExclusiveLock); return PointerGetDatum(NULL); case RI_KEYS_NONE_NULL: break; } if (ri_KeysEqual(pk_rel, old_row, new_row, &riinfo, true)) { heap_close(fk_rel, RowExclusiveLock); return PointerGetDatum(NULL); } if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); { StringInfoData querybuf; StringInfoData qualbuf; char fkrelname[MAX_QUOTED_REL_NAME_LEN]; char attname[MAX_QUOTED_NAME_LEN]; char paramname[16]; const char *querysep; const char *qualsep; Oid queryoids[RI_MAX_NUMKEYS]; int i; initStringInfo(&querybuf); initStringInfo(&qualbuf); quoteRelationName(fkrelname, fk_rel); appendStringInfo(&querybuf, "UPDATE ONLY %s SET", fkrelname); querysep = ""; qualsep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]); Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]); quoteOneName(attname, RIAttName(fk_rel, riinfo.fk_attnums[i])); if (riinfo.confmatchtype == FKCONSTR_MATCH_FULL || !ri_OneKeyEqual(pk_rel, i, old_row, new_row, &riinfo, true)) { appendStringInfo(&querybuf, "%s %s = DEFAULT", querysep, attname); querysep = ","; } sprintf(paramname, "$%d", i + 1); ri_GenerateQual(&qualbuf, qualsep, paramname, pk_type, riinfo.pf_eq_oprs[i], attname, fk_type); qualsep = "AND"; queryoids[i] = pk_type; } appendStringInfoString(&querybuf, qualbuf.data); qplan = ri_PlanCheck(querybuf.data, riinfo.nkeys, queryoids, &qkey, fk_rel, pk_rel, false); } ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, true, SPI_OK_UPDATE, NameStr(riinfo.conname)); if (SPI_finish() != SPI_OK_FINISH) elog(ERROR, "SPI_finish failed"); heap_close(fk_rel, RowExclusiveLock); RI_FKey_noaction_upd(fcinfo); return PointerGetDatum(NULL); case FKCONSTR_MATCH_PARTIAL: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("MATCH PARTIAL not yet implemented"))); return PointerGetDatum(NULL); } elog(ERROR, "invalid confmatchtype"); return PointerGetDatum(NULL); }
/* * Post-parse-analysis hook: mark query with a queryId */ static void pgss_post_parse_analyze(ParseState *pstate, Query *query) { pgssJumbleState jstate; pgssStoreKind kind = PGSS_PARSE; if (prev_post_parse_analyze_hook) prev_post_parse_analyze_hook(pstate, query); if (!IsSystemInitialized()) return; if (query->utilityStmt) { query->queryId = UINT64CONST(0); return; } query->queryId = get_query_id(&jstate, query); if (query->queryId == UINT64CONST(0)) query->queryId = UINT64CONST(1); if (jstate.clocations_count <= 0) return; pgss_store_query(query->queryId, pstate->p_sourcetext, query->commandType, query->stmt_location, query->stmt_len, &jstate, kind); }
// // Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ZETASQL_COMMON_PROTO_HELPER_H_ #define ZETASQL_COMMON_PROTO_HELPER_H_ #include <cstdint> #include <optional> #include <set> #include <string> #include "google/protobuf/compiler/importer.h" #include "google/protobuf/descriptor.pb.h" #include "google/protobuf/descriptor.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "zetasql/base/status.h" namespace zetasql { // Simple implementation of proto ErrorCollector and MultiFileErrorCollector // interfaces that collects errors encountered while parsing proto files, and // provides accessors for the collected errors. The two interfaces are // supported because a single instance of this error collector is used // by modules for both a DescriptorPool and its related ProtoFileParser. class SimpleErrorCollector : public google::protobuf::DescriptorPool::ErrorCollector, public google::protobuf::compiler::MultiFileErrorCollector { public: SimpleErrorCollector() {} // Not copyable or movable. SimpleErrorCollector(const SimpleErrorCollector&) = delete; SimpleErrorCollector& operator=(const SimpleErrorCollector&) = delete; // MultiFileErrorCollector interface method. void AddError(const std::string& filename, int line, int column, const std::string& message) override { absl::StrAppend(&error_, "Filename ", filename, " Line ", line, " Column ", column, " :", message, "\n"); } // ErrorCollector interface method. void AddError(const std::string& filename, const std::string& element_name, const google::protobuf::Message* descriptor, ErrorLocation location, const std::string& message) override { // This implementation is copied from AppendToStringErrorCollector(), // and <location> is ignored. TODO: Figure out if <location> // is useful, and add it into the error string if so. Looking at the // ErrorLocation enum, it is unclear if it is useful in this context. absl::StrAppend(&error_, !error_.empty() ? "\n" : "", filename, " : ", element_name, " : ", message); } bool HasError() const { return !error_.empty(); } const std::string& GetError() const { return error_; } const void ClearError() { error_.clear(); } private: std::string error_; }; // Adds <file_descr> and all its dependent files to <file_descriptor_set> if // they are not already present in <file_descriptors>. Referenced files will be // added before referencing files. FileDescriptor dependencies do not allow // circular dependencies, so this cannot recurse indefinitely. Optionally // takes a <file_descriptor_set_max_size_bytes> which sets a maximum size // on the returned <file_descriptor_set>, and returns an error for an // out-of-memory condition (this is also checked via ThreadHasEnoughStack()). absl::Status PopulateFileDescriptorSet( const google::protobuf::FileDescriptor* file_descr, std::optional<int64_t> file_descriptor_set_max_size_bytes, google::protobuf::FileDescriptorSet* file_descriptor_set, std::set<const google::protobuf::FileDescriptor*>* file_descriptors); // Deserialize a FileDescriptorSet and add all FileDescriptors into the given // DescriptorPool. // Return an error status if the FileDescriptorSet is incomplete or contain // other error. absl::Status AddFileDescriptorSetToPool( const google::protobuf::FileDescriptorSet* file_descriptor_set, google::protobuf::DescriptorPool* pool); } // namespace zetasql #endif // ZETASQL_COMMON_PROTO_HELPER_H_
#include <stdio.h> #include <stdlib.h> void ft_sort(int *array, int size) { int is_sorted; int i; int tmp; is_sorted = 0; while(!is_sorted) { i = 0; is_sorted = 1; while (i < size - 1) { if (*(array + i) > *(array + i + 1)) { tmp = *(array + i); *(array + i) = *(array + i + 1); *(array + i + 1) = tmp; is_sorted = 0; } i++; } size--; } } int ft_getcount(const char *str) { int i; i = 0; while (*str) { i += *str != '+'; str++; } return (i); } int *ft_getarray(const char *str, int *size) { int *array; int i; *size = ft_getcount(str); array = (int*)malloc(sizeof(int) * *size); i = 0; while (*str) { if (*str != '+') { *(array + i) = *str - '0'; i++; } str++; } return (array); } int main(void) { int *array; int size; char buffer[101]; scanf("%s", buffer); array = ft_getarray(buffer, &size); int i; i = 0; ft_sort(array, size); while (i < size) { printf("%d%s", *(array + i), i < size - 1 ? "+" : ""); i++; } free(array); return (0); }
/* Add values to phi nodes in final_bb for the two new edges. E1F is the edge from the basic block loading values from an array and E2F from the basic block loading default values. BBF is the last switch basic block (see the bbf description in the comment below). */ static void fix_phi_nodes (edge e1f, edge e2f, basic_block bbf, struct switch_conv_info *info) { gphi_iterator gsi; int i; for (gsi = gsi_start_phis (bbf), i = 0; !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); tree inbound, outbound; if (virtual_operand_p (gimple_phi_result (phi))) inbound = outbound = info->target_vop; else { inbound = info->target_inbound_names[i]; outbound = info->target_outbound_names[i++]; } add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION); if (!info->default_case_nonstandard) add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION); } }
/**************************************************************************** PARAMETERS: info - Array of PCIDeviceInfo structures to check against index - Index of the current device to check RETURNS: True if the device is a duplicate, false if not. REMARKS: This function goes through the list of all devices preceeding the newly found device in the info structure, and checks that the device is not a duplicate of a previous device. Some devices incorrectly enumerate themselves at different function addresses so we check here to exclude those cases. ****************************************************************************/ static ibool CheckDuplicate( PCIDeviceInfo *info, PCIDeviceInfo *prev) { if (info->VendorID == 0) return true; if (info->slot.p.Bus == prev->slot.p.Bus && info->slot.p.Device == prev->slot.p.Device && info->DeviceID == prev->DeviceID) return true; return false; }
/** * @brief Process standard request * * @param None * * @return None * * @details Parse standard request and perform the corresponding action. * */ void USBD_StandardRequest(void) { uint32_t addr; g_usbd_CtrlInPointer = 0; g_usbd_CtrlInSize = 0ul; if((g_usbd_SetupPacket[0] & 0x80ul) == 0x80ul) { if(g_usbd_SetupPacket[1]== GET_CONFIGURATION) { addr = USBD_BUF_BASE + USBD_GET_EP_BUF_ADDR(EP0); M8(addr) = (uint8_t)g_usbd_UsbConfig; USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 1ul); USBD_PrepareCtrlOut(0, 0ul); } else if(g_usbd_SetupPacket[1]== GET_DESCRIPTOR) { USBD_GetDescriptor(); USBD_PrepareCtrlOut(0, 0ul); } else if(g_usbd_SetupPacket[1]== GET_INTERFACE) { addr = USBD_BUF_BASE + USBD_GET_EP_BUF_ADDR(EP0); M8(addr) = (uint8_t)g_usbd_UsbAltInterface; USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 1ul); USBD_PrepareCtrlOut(0, 0ul); } else if(g_usbd_SetupPacket[1]== GET_STATUS) { if(g_usbd_SetupPacket[0] == 0x80ul) { uint8_t u8Tmp; u8Tmp = (uint8_t)0ul; if ((g_usbd_sInfo->gu8ConfigDesc[7] & 0x40ul) == 0x40ul) { u8Tmp |= (uint8_t)1ul; } if ((g_usbd_sInfo->gu8ConfigDesc[7] & 0x20ul) == 0x20ul) { u8Tmp |= (uint8_t)(g_usbd_RemoteWakeupEn << 1ul); } addr = USBD_BUF_BASE + USBD_GET_EP_BUF_ADDR(EP0); M8(addr) = u8Tmp; } else if(g_usbd_SetupPacket[0] == 0x81ul) { addr = USBD_BUF_BASE + USBD_GET_EP_BUF_ADDR(EP0); M8(addr) = (uint8_t)0ul; } else if(g_usbd_SetupPacket[0] == 0x82ul) { uint8_t ep = (uint8_t)(g_usbd_SetupPacket[4] & 0xFul); addr = USBD_BUF_BASE + USBD_GET_EP_BUF_ADDR(EP0); M8(addr) = (uint8_t)(USBD_GetStall(ep) ? 1ul : 0ul); } addr = USBD_BUF_BASE + USBD_GET_EP_BUF_ADDR(EP0) + 1ul; M8(addr) = (uint8_t)0ul; USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 2ul); USBD_PrepareCtrlOut(0, 0ul); } else { USBD_SET_EP_STALL(EP0); USBD_SET_EP_STALL(EP1); } } else { if(g_usbd_SetupPacket[1]== CLEAR_FEATURE) { if(g_usbd_SetupPacket[2] == FEATURE_ENDPOINT_HALT) { uint32_t epNum, i; epNum = (uint8_t)(g_usbd_SetupPacket[4] & 0xFul); for(i = 0ul; i < USBD_MAX_EP; i++) { if(((USBD->EP[i].CFG & 0xFul) == epNum) && ((g_u32EpStallLock & (1ul << i)) == 0ul)) { USBD->EP[i].CFGP &= ~USBD_CFGP_SSTALL_Msk; } } } else if(g_usbd_SetupPacket[2] == FEATURE_DEVICE_REMOTE_WAKEUP) { g_usbd_RemoteWakeupEn = (uint8_t)0; } USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 0ul); } else if(g_usbd_SetupPacket[1]== SET_ADDRESS) { g_usbd_UsbAddr = g_usbd_SetupPacket[2]; USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 0ul); } else if(g_usbd_SetupPacket[1]== SET_CONFIGURATION) { g_usbd_UsbConfig = g_usbd_SetupPacket[2]; if(g_usbd_pfnSetConfigCallback) { #warning need to look at this at some point as it is being called in error from a cold boot } USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 0ul); } else if(g_usbd_SetupPacket[1]== SET_FEATURE) { if(g_usbd_SetupPacket[2] == FEATURE_ENDPOINT_HALT) { USBD_SetStall((uint8_t)(g_usbd_SetupPacket[4] & 0xFul)); } else if(g_usbd_SetupPacket[2] == FEATURE_DEVICE_REMOTE_WAKEUP) { g_usbd_RemoteWakeupEn = (uint8_t)1ul; } USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 0ul); } else if(g_usbd_SetupPacket[1]== SET_INTERFACE) { g_usbd_UsbAltInterface = g_usbd_SetupPacket[2]; if(g_usbd_pfnSetInterface != NULL) { g_usbd_pfnSetInterface(g_usbd_UsbAltInterface); } USBD_SET_DATA1(EP0); USBD_SET_PAYLOAD_LEN(EP0, 0ul); } else { USBD_SET_EP_STALL(EP0); USBD_SET_EP_STALL(EP1); } } }
/** * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests * @dev_priv: i915 device * @crtc: intel crtc * @crtc_state: incoming crtc_state to validate and setup scalers * * This function sets up scalers based on staged scaling requests for * a @crtc and its planes. It is called from crtc level check path. If request * is a supportable request, it attaches scalers to requested planes and crtc. * * This function takes into account the current scaler(s) in use by any planes * not being part of this atomic state * * Returns: * 0 - scalers were setup succesfully * error code - otherwise */ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) { struct drm_plane *plane = NULL; struct intel_plane *intel_plane; struct intel_plane_state *plane_state = NULL; struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; struct drm_atomic_state *drm_state = crtc_state->base.state; int num_scalers_need; int i, j; num_scalers_need = hweight32(scaler_state->scaler_users); if (num_scalers_need > intel_crtc->num_scalers){ DRM_DEBUG_KMS("Too many scaling requests %d > %d\n", num_scalers_need, intel_crtc->num_scalers); return -EINVAL; } for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { int *scaler_id; const char *name; int idx; if (!(scaler_state->scaler_users & (1 << i))) continue; if (i == SKL_CRTC_INDEX) { name = "CRTC"; idx = intel_crtc->base.base.id; scaler_id = &scaler_state->scaler_id; } else { name = "PLANE"; plane = drm_state->planes[i].ptr; if (!plane) { struct drm_plane_state *state; plane = drm_plane_from_index(&dev_priv->drm, i); state = drm_atomic_get_plane_state(drm_state, plane); if (IS_ERR(state)) { DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n", plane->base.id); return PTR_ERR(state); } crtc_state->base.planes_changed = true; } intel_plane = to_intel_plane(plane); idx = plane->base.id; if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { continue; } plane_state = intel_atomic_get_existing_plane_state(drm_state, intel_plane); scaler_id = &plane_state->scaler_id; } if (*scaler_id < 0) { for (j = 0; j < intel_crtc->num_scalers; j++) { if (!scaler_state->scalers[j].in_use) { scaler_state->scalers[j].in_use = 1; *scaler_id = j; DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", intel_crtc->pipe, *scaler_id, name, idx); break; } } } if (WARN_ON(*scaler_id < 0)) { DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); continue; } if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { scaler_state->scalers[*scaler_id].mode = 0; } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { *scaler_id = 0; scaler_state->scalers[0].in_use = 1; scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; scaler_state->scalers[1].in_use = 0; } else { scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; } } return 0; }
/** Sends user interrupt via software. */ void send_user_interrupt(int intrsrc) { int signo = SIGRTMIN + intrsrc; int r = kill(getpid(), signo); if (r) error("send_software_interrupt kill"); }
/* FNDECL is assumed to be a builtin where truncation can be propagated across (for instance floor((double)f) == (double)floorf (f). Do the transformation for a call with argument ARG. */ static tree fold_trunc_transparent_mathfn (location_t loc, tree fndecl, tree arg) { enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); if (!validate_arg (arg, REAL_TYPE)) return NULL_TREE; if (fcode == builtin_mathfn_code (arg)) return arg; if (! flag_errno_math && integer_valued_real_p (arg)) return arg; if (optimize) { tree arg0 = strip_float_extensions (arg); tree ftype = TREE_TYPE (TREE_TYPE (fndecl)); tree newtype = TREE_TYPE (arg0); tree decl; if (TYPE_PRECISION (newtype) < TYPE_PRECISION (ftype) && (decl = mathfn_built_in (newtype, fcode))) return fold_convert_loc (loc, ftype, build_call_expr_loc (loc, decl, 1, fold_convert_loc (loc, newtype, arg0))); } return NULL_TREE; }
#pragma once #include <Eigen/Core> #include <vcg/complex/complex.h> #include <vcg/complex/algorithms/update/bounding.h> /* * Copy a mesh stored as a #V x 3 matrix of vertices, V, and a #F x 3 matrix of face indices into a VCG mesh */ template <typename DerivedV, typename DerivedF, typename DerivedN, typename VCGMesh> static void vcg_mesh_from_vfn(const Eigen::MatrixBase<DerivedV>& V, const Eigen::MatrixBase<DerivedF>& F, const Eigen::MatrixBase<DerivedN>& N, VCGMesh& m) { using namespace vcg; typename VCGMesh::VertexIterator vit = Allocator<VCGMesh>::AddVertices(m, V.rows()); std::vector<typename VCGMesh::VertexPointer> ivp(V.rows()); for (int i = 0; i < V.rows(); i++) { ivp[i] = &*vit; vit->P() = typename VCGMesh::CoordType(V(i, 0), V(i, 1), V(i, 2)); if (N.rows() > 0) { vit->N() = typename VCGMesh::CoordType(N(i, 0), N(i, 1), N(i, 2)); } vit++; } if (F.rows() > 0) { typename VCGMesh::FaceIterator fit = Allocator<VCGMesh>::AddFaces(m, F.rows()); for (int i = 0; i < F.rows(); i++) { fit->V(0) = ivp[F(i, 0)]; fit->V(1) = ivp[F(i, 1)]; fit->V(2) = ivp[F(i, 2)]; fit++; } } tri::UpdateBounding<VCGMesh>::Box(m); } /* * Copy a mesh stored as a #V x 3 matrix of vertices, V, and a #F x 3 matrix of face indices into a VCG mesh */ template <typename DerivedV, typename DerivedF, typename VCGMesh> static void vcg_mesh_from_vf(const Eigen::MatrixBase<DerivedV>& V, const Eigen::MatrixBase<DerivedF>& F, VCGMesh& m) { Eigen::MatrixXd N(0, 3); vcg_mesh_from_vfn(V, F, N, m); } /* * Copy a mesh stored as a #V x 3 matrix of vertices, V, and a #F x 3 matrix of face indices into a VCG mesh */ template <typename DerivedV, typename VCGMesh> static void vcg_mesh_from_v(const Eigen::MatrixBase<DerivedV>& V, VCGMesh& m) { Eigen::MatrixXi F(0, 3); Eigen::MatrixXd N(0, 3); vcg_mesh_from_vfn(V, F, N, m); } /* * Use this to sample vertex indices when we are sampling from a point cloud */ template<class MeshType> class EigenVertexIndexSampler { public: typedef typename MeshType::VertexType VertexType; // The mesh we are sampling from MeshType &sampled_mesh; // Indices into the mesh vertex array, this is an eigen matrix of some type typedef Eigen::Matrix<std::ptrdiff_t, Eigen::Dynamic, 1> IndexArray; IndexArray &indices; // Number of vertices int vcount = 0; EigenVertexIndexSampler(MeshType &in_mesh, IndexArray &out_inds) : sampled_mesh(in_mesh), indices(out_inds) { } void trim() { indices.conservativeResize(vcount, 1); } void reset() { vcount = 0; } void maybe_resize() { // If we are about to overflow indexes, double its size if (indices.size() <= vcount) { const int n_rows = indices.size() == 0 ? 1024 : indices.size(); indices.conservativeResize(2 * n_rows, 1); } } void AddVert(const VertexType &p) { maybe_resize(); std::ptrdiff_t p_offset = &p - &*sampled_mesh.vert.begin(); indices(vcount, 0) = p_offset; vcount += 1; } };
#pragma once #include <memory> #include <random> #include <shared_mutex> #include "common/cache.h" #include "common/clock.h" #include "common/error.h" #include "common/logger.h" #include "common/utils.h" #include "dns/dnsfilter/dnsfilter.h" #include "dns/proxy/dnsproxy.h" #include "dns/proxy/dnsproxy_events.h" #include "dns/proxy/dnsproxy_settings.h" #include "dns/upstream/upstream.h" #include "dns64.h" #include "response_cache.h" #include "retransmission_detector.h" namespace ag::dns { struct UpstreamExchangeResult { Result<ldns_pkt_ptr, DnsError> result; Upstream *upstream; }; class DnsForwarder { public: using InitResult = std::pair<bool, Error<DnsProxyInitError>>; DnsForwarder(); ~DnsForwarder(); InitResult init(EventLoopPtr loop, const DnsProxySettings &settings, const DnsProxyEvents &events); void deinit(); coro::Task<Uint8Vector> handle_message(Uint8View message, const DnsMessageInfo *info); private: struct HandleMessageResult { Uint8Vector response_wire; DnsRequestProcessedEvent event; bool timed_out; }; void truncate_response(ldns_pkt *response, const ldns_pkt *request, const DnsMessageInfo *info); coro::Task<Uint8Vector> handle_message_with_timeout( ldns_pkt_ptr request, std::optional<DnsMessageInfo> info, bool fallback_only); coro::Task<HandleMessageResult> handle_message_internal( ldns_pkt_ptr request, std::optional<DnsMessageInfo> info, bool fallback_only); coro::Task<UpstreamExchangeResult> do_upstreams_exchange(std::string_view normalized_domain, const ldns_pkt *request, bool force_fallback, const DnsMessageInfo *info = nullptr); coro::Task<UpstreamExchangeResult> do_upstream_exchange( Upstream *upstream, const ldns_pkt *request, const DnsMessageInfo *info, Millis error_rtt); coro::Task<UpstreamExchangeResult> do_upstream_exchange_shared( Upstream *upstream, std::shared_ptr<const ldns_pkt> request, const DnsMessageInfo *info, Millis error_rtt); coro::Task<UpstreamExchangeResult> do_parallel_exchange(const std::vector<Upstream *> &upstreams, const ldns_pkt *request, const DnsMessageInfo *info, Millis error_rtt, bool wait_all); bool apply_fallback_filter(std::string_view hostname, const ldns_pkt *request); coro::Task<ldns_pkt_ptr> apply_filter(DnsFilter::MatchParam match, const ldns_pkt *request, DnsRequestProcessedEvent &event, std::vector<DnsFilter::Rule> &last_effective_rules, bool fallback_only); coro::Task<ldns_pkt_ptr> apply_cname_filter(const ldns_rr *cname_rr, const ldns_pkt *request, DnsRequestProcessedEvent &event, std::vector<DnsFilter::Rule> &last_effective_rules, bool fallback_only); coro::Task<ldns_pkt_ptr> apply_ip_filter(const ldns_rr *rr, const ldns_pkt *request, DnsRequestProcessedEvent &event, std::vector<DnsFilter::Rule> &last_effective_rules, bool fallback_only); coro::Task<ldns_pkt_ptr> try_dns64_aaaa_synthesis(Upstream *upstream, const ldns_pkt_ptr &request) const; void finalize_processed_event(DnsRequestProcessedEvent &event, const ldns_pkt *request, const ldns_pkt *response, const ldns_pkt *original_response, std::optional<int32_t> upstream_id, Error<DnsError> error = nullptr) const; bool finalize_dnssec_log_logic(ldns_pkt *response, bool is_our_do_bit); Logger m_log{"dns_forwarder"}; EventLoopPtr m_loop; const DnsProxySettings *m_settings = nullptr; const DnsProxyEvents *m_events = nullptr; std::vector<UpstreamPtr> m_upstreams; std::vector<UpstreamPtr> m_fallbacks; DnsFilter m_filter; DnsFilter::Handle m_filter_handle = nullptr; DnsFilter::Handle m_fallback_filter_handle = nullptr; dns64::StatePtr m_dns64_state = nullptr; std::shared_ptr<SocketFactory> m_socket_factory; std::shared_ptr<bool> m_shutdown_guard; ResponseCache m_response_cache; RetransmissionDetector m_retransmission_detector; std::default_random_engine m_random_engine; coro::Task<void> optimistic_cache_background_resolve(ldns_pkt_ptr req, std::string normalized_domain); }; } // namespace ag::dns
#include<stdio.h> int main(){ int arr[150]; int n,i,x,j,k,l,max,maxindex=0,min,minindex=0; scanf("%d",&n); for(i=0;i<n;i++){ scanf("%d",&x); arr[i]=x; } max=arr[0]; for(j=0;j<n;j++){ if(arr[j]>max){ max=arr[j]; maxindex=j; } } min=arr[0]; for(k=0;k<n;k++){ if(arr[k]<=min){ if(min==arr[k]){ minindex=k; } min=arr[k]; minindex=k; } } // maxindex=maxindex+1; // minindex=minindex+1; if(minindex<maxindex){ minindex=(n-1)-minindex; printf("%d\n",maxindex+minindex-1); } else{ minindex=(n-1)-minindex; printf("%d\n",minindex+maxindex); } return 0;}
/* Return a char pointer for a C string if it is a string constant or sum of string constant and integer constant. We only support string constants properly terminated with '\0' character. If STRLEN is a valid pointer, length (including terminating character) of returned string is stored to the argument. */ const char * c_getstr (tree src, unsigned HOST_WIDE_INT *strlen) { tree offset_node; if (strlen) *strlen = 0; src = string_constant (src, &offset_node); if (src == 0) return NULL; unsigned HOST_WIDE_INT offset = 0; if (offset_node != NULL_TREE) { if (!tree_fits_uhwi_p (offset_node)) return NULL; else offset = tree_to_uhwi (offset_node); } unsigned HOST_WIDE_INT string_length = TREE_STRING_LENGTH (src); const char *string = TREE_STRING_POINTER (src); if (string_length == 0 || string[string_length - 1] != '\0' || offset >= string_length) return NULL; if (strlen) *strlen = string_length - offset; return string + offset; }
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ /** * @file * @brief This file provides the declaration of the class `NavigationLane`. */ #pragma once #include <list> #include <memory> #include <tuple> #include <unordered_map> #include <utility> #include "modules/common/vehicle_state/proto/vehicle_state.pb.h" #include "modules/localization/proto/localization.pb.h" #include "modules/map/relative_map/proto/navigation.pb.h" #include "modules/map/relative_map/proto/relative_map_config.pb.h" #include "modules/perception/proto/perception_obstacle.pb.h" /** * @namespace apollo::relative_map * @brief apollo::relative_map */ namespace apollo { namespace relative_map { // A navigation path tuple. // // first element: original navigation line index of the current navigation path. // A negative value indicates illegal. // // second element: half of the lateral distance to the left adjacent navigation // path, that is, the left width of the lane generated by this navigation path. // If the navigation path is generated based on lane markers, the value is the // perceived left lane width. If there is no left adjacent navigation path, the // value is "default_left_width_". A negative value indicates illegal. // // third element: half of the lateral distance to the right adjacent navigation // path, that is, the right width of the lane generated by this navigation path. // If the navigation path is generated based on lane markers, the value is the // perceived right lane width. If there is no right adjacent navigation path, // the value is "default_right_width_". A negative value indicates illegal. // // fourth element : a shared pointer of the current navigation path. typedef std::tuple<int, double, double, std::shared_ptr<NavigationPath>> NaviPathTuple; // A stitching index pair. // pair.first: the start stitching index of the current navigation line. // pair.second: the end stitching index of the current navigation line. typedef std::pair<int, int> StitchIndexPair; // A projection index pair. // pair.first: projection index of the vehicle in the current navigation line. // pair.second: the distance between the vehicle's initial position and the // projection position in the current navigation line. typedef std::pair<int, double> ProjIndexPair; /** * @class NavigationLane * @brief NavigationLane generates a real-time relative map based on navagation * lines. * * First, several navigation lines are received from the `NavigationInfo` * object; * * Second, several navigation line segments with the length of about 250 m are * cut from the whole navigation lines and the UTM coordinates are converted * into local coordinates with the current position of the vehicle as the * origin; * * Third, the navigation line segment of the vehicle's current lane is merged * with the perceived lane centerline. * * Fourth, a real-time relative map is dynamically created based on navigation * line segments and perceived lane width; * * Fifth, the relative map is output as a `MapMsg` object pointer. */ class NavigationLane { public: NavigationLane() = default; explicit NavigationLane(const NavigationLaneConfig& config); ~NavigationLane() = default; /** * @brief Set the configuration information required by the `NavigationLane`. * @param config Configuration object. * @return None. */ void SetConfig(const NavigationLaneConfig& config); /** * @brief Update navigation line information. * @param navigation_info Navigation line information to be updated. * @return None. */ void UpdateNavigationInfo(const NavigationInfo& navigation_info); /** * @brief Set the default width of a lane. * @param left_width Left half width of a lane. * @param right_width Right half width of a lane. * @return None. */ void SetDefaultWidth(const double left_width, const double right_width) { default_left_width_ = left_width; default_right_width_ = right_width; } /** * @brief Generate a suitable path (i.e. a navigation line segment). * @param * @return True if a suitable path is created; false otherwise. */ bool GeneratePath(); /** * @brief Update perceived lane line information. * @param perception_obstacles Perceived lane line information to be updated. * @return None. */ void UpdatePerception( const perception::PerceptionObstacles& perception_obstacles) { perception_obstacles_ = perception_obstacles; } /** * @brief Get the generated lane segment where the vehicle is currently * located. * @param * @return The generated lane segment where the vehicle is currently located. */ NavigationPath Path() const { const auto& current_navi_path = std::get<3>(current_navi_path_tuple_); if (current_navi_path) { return *current_navi_path; } return NavigationPath(); } /** * @brief Generate a real-time relative map of approximately 250 m in length * based on several navigation line segments and map generation configuration * information. * @param map_config Map generation configuration information. * @param map_msg A pointer which outputs the real-time relative map. * @return True if the real-time relative map is created; false otherwise. */ bool CreateMap(const MapGenerationParam& map_config, MapMsg* const map_msg) const; private: /** * @brief Calculate the value of a cubic polynomial according to the given * coefficients and an independent variable. * @param c0 Cubic polynomial coefficient. * @param c1 Cubic polynomial coefficient. * @param c2 Cubic polynomial coefficient. * @param c3 Cubic polynomial coefficient. * @param x Independent variable. * @return Calculated value of the cubic polynomial. */ double EvaluateCubicPolynomial(const double c0, const double c1, const double c2, const double c3, const double x) const; /** * @brief Calculate the curvature value based on the cubic polynomial's * coefficients and an independent variable. * @param c1 Cubic polynomial coefficient. * @param c2 Cubic polynomial coefficient. * @param c3 Cubic polynomial coefficient. * @param x Independent variable. * @return Calculated curvature value. */ double GetKappa(const double c1, const double c2, const double c3, const double x); /** * @brief In a navigation line segment, starting from the point given by * `start_index`, the matched point after the distance `s` is calculated, and * the index of the matched point is given. * @param path The specific navigation line segment. * @param start_index The index of the starting point. * @param s The distance from the starting point. * @param matched_index The pointer storing the index of the matched point. * @return The matched point after the distance `s`. */ common::PathPoint GetPathPointByS(const common::Path& path, const int start_index, const double s, int* const matched_index); /** * @brief Generate a lane centerline from the perceived lane markings and * convert it to a navigation line segment. * @param lane_marker The perceived lane markings. * @param path The converted navigation line segment. * @return None. */ void ConvertLaneMarkerToPath(const perception::LaneMarkers& lane_marker, common::Path* const path); /** * @brief A navigation line segment with the length of about 250 m are cut * from the whole navigation lines and the UTM coordinates are converted * into local coordinates with the current position of the vehicle as the * origin. * @param line_index The index of the navigation line segment vector. * @param path The converted navigation line segment. * @return True if a suitable path is created; false otherwise. */ bool ConvertNavigationLineToPath(const int line_index, common::Path* const path); /** * @brief Merge the navigation line segment of the vehicle's current lane and * the perceived lane centerline. * @param line_index The index of the navigation line segment vector. * @param path The merged navigation line segment. * @return None. */ void MergeNavigationLineAndLaneMarker(const int line_index, common::Path* const path); /** * @brief Update the index of the vehicle's current location in an entire * navigation line. * @param path The entire navigation line. Note that the path here refers to * the entire navigation line stored in UTM coordinates. * @param line_index The index of the whole navigation line vector stored in a * `NavigationInfo` object. * @return Updated projection index pair. */ ProjIndexPair UpdateProjectionIndex(const common::Path& path, const int line_index); /** * @brief If an entire navigation line is a cyclic/circular * route, the closest matching point at the starting and end positions is * recorded so that the vehicle can drive cyclically. * @param * @return None. */ void UpdateStitchIndexInfo(); private: // the configuration information required by the `NavigationLane` NavigationLaneConfig config_; // received from topic: /apollo/perception_obstacles perception::PerceptionObstacles perception_obstacles_; // received from topic: /apollo/navigation NavigationInfo navigation_info_; // navigation_path_list_ is a list of navigation paths. The internal paths // are arranged from left to right based on the vehicle's driving direction. // A navigation path is the combined results from perception and navigation. std::list<NaviPathTuple> navigation_path_list_; // the navigation path which the vehicle is currently on. NaviPathTuple current_navi_path_tuple_; // when invalid, left_width_ < 0 double perceived_left_width_ = -1.0; // when invalid, right_width_ < 0 double perceived_right_width_ = -1.0; // The standard lane width of China's expressway is 3.75 meters. double default_left_width_ = 1.875; double default_right_width_ = 1.875; // key: line index, // value: last projection index pair in the "key" line. std::unordered_map<int, ProjIndexPair> last_project_index_map_; // key: line index, // value: stitching index pair in the "key" line. std::unordered_map<int, StitchIndexPair> stitch_index_map_; // in world coordination: ENU localization::Pose original_pose_; }; } // namespace relative_map } // namespace apollo
/* +----------------------------------------------------------------------+ | Zend Engine | +----------------------------------------------------------------------+ | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) | +----------------------------------------------------------------------+ | This source file is subject to version 2.00 of the Zend license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.zend.com/license/2_00.txt. | | If you did not receive a copy of the Zend license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@zend.com so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: David Soria Parra <david.soriaparra@sun.com> | +----------------------------------------------------------------------+ */ #ifndef _ZEND_DTRACE_H #define _ZEND_DTRACE_H #ifndef ZEND_WIN32 # include <unistd.h> #endif #ifdef __cplusplus extern "C" { #endif #ifdef HAVE_DTRACE ZEND_API extern zend_op_array *(*zend_dtrace_compile_file)(zend_file_handle *file_handle, int type); ZEND_API extern void (*zend_dtrace_execute)(zend_op_array *op_array); ZEND_API extern void (*zend_dtrace_execute_internal)(zend_execute_data *execute_data, zval *return_value); ZEND_API zend_op_array *dtrace_compile_file(zend_file_handle *file_handle, int type); ZEND_API void dtrace_execute_ex(zend_execute_data *execute_data); ZEND_API void dtrace_execute_internal(zend_execute_data *execute_data, zval *return_value); #include <zend_dtrace_gen.h> void dtrace_error_notify_cb(int type, zend_string *error_filename, uint32_t error_lineno, zend_string *message); #endif /* HAVE_DTRACE */ #ifdef __cplusplus } #endif #endif /* _ZEND_DTRACE_H */
/* * Function : libaroma_control_new * Return Value: LIBAROMA_CONTROLP * Descriptions: create primitive control */ LIBAROMA_CONTROLP libaroma_control_new( word id, int x, int y, int w, int h, int minw, int minh, voidp internal, LIBAROMA_CONTROL_HANDLERP handler, LIBAROMA_WINDOWP win ){ if (handler==NULL){ return NULL; } LIBAROMA_CONTROLP ret = (LIBAROMA_CONTROLP) calloc(sizeof(LIBAROMA_CONTROL),1); if (!ret){ ALOGW("window_control_new cannot allocating memory"); return NULL; } ret->minw = minw; ret->minh = minh; ret->rx = x; ret->ry = y; ret->rw = w; ret->rh = h; ret->id = id; ret->handler = handler; ret->internal = internal; if (win){ return libaroma_window_attach(win,ret); } return ret; }