idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
35,700
public static int getSupervisorPortNum ( Map conf , int sysCpuNum , Long physicalMemSize ) { double cpuWeight = ConfigExtension . getSupervisorSlotsPortCpuWeight ( conf ) ; int cpuPortNum = ( int ) ( sysCpuNum / cpuWeight ) ; if ( cpuPortNum < 1 ) { LOG . info ( "Invalid supervisor.slots.port.cpu.weight setting :" + cp...
calculate port number from cpu number and physical memory size
35,701
public static Map < String , Double > getMetrics ( Map conf , String topologyName , MetaType metricType , Integer window ) { NimbusClientWrapper nimbusClient = null ; Iface client = null ; Map < String , Double > summary = new HashMap < > ( ) ; try { nimbusClient = new NimbusClientWrapper ( ) ; nimbusClient . init ( co...
Get Topology Metrics
35,702
public < T > T time ( Callable < T > event ) throws Exception { final long startTime = System . currentTimeMillis ( ) ; try { return event . call ( ) ; } finally { update ( System . currentTimeMillis ( ) - startTime ) ; } }
Times and records the duration of event .
35,703
private Map < Integer , LocalAssignment > getLocalAssign ( StormClusterState stormClusterState , String supervisorId , Map < String , Assignment > assignments ) throws Exception { Map < Integer , LocalAssignment > portToAssignment = new HashMap < > ( ) ; for ( Entry < String , Assignment > assignEntry : assignments . e...
a port must be assigned to a topology
35,704
@ SuppressWarnings ( "unused" ) private Map < Integer , LocalAssignment > readMyTasks ( StormClusterState stormClusterState , String topologyId , String supervisorId , Assignment assignmentInfo ) throws Exception { Map < Integer , LocalAssignment > portTasks = new HashMap < > ( ) ; Set < ResourceWorkerSlot > workers = ...
get local node s tasks
35,705
public static Map < String , String > getTopologyCodeLocations ( Map < String , Assignment > assignments , String supervisorId ) throws Exception { Map < String , String > rtn = new HashMap < > ( ) ; for ( Entry < String , Assignment > entry : assignments . entrySet ( ) ) { String topologyId = entry . getKey ( ) ; Assi...
get master code dir for each topology
35,706
public void emitDirect ( int taskId , String streamId , Tuple anchor , List < Object > tuple ) { emitDirect ( taskId , streamId , Arrays . asList ( anchor ) , tuple ) ; }
Emits a tuple directly to the specified task id on the specified stream . If the target bolt does not subscribe to this bolt using a direct grouping the tuple will not be sent . If the specified output stream is not declared as direct or the target bolt subscribes with a non - direct grouping an error will occur at run...
35,707
private boolean monitorApplication ( ApplicationId appId ) throws YarnException , IOException { Integer monitorTimes = JOYConstants . MONITOR_TIMES ; while ( true ) { try { Thread . sleep ( JOYConstants . MONITOR_TIME_INTERVAL ) ; } catch ( InterruptedException e ) { LOG . debug ( "Thread sleep in monitoring loop inter...
Monitor the submitted application for completion . Kill application if time expires .
35,708
public List < Long > getAllVersions ( ) throws IOException { List < Long > ret = new ArrayList < > ( ) ; for ( String s : listDir ( _root ) ) { if ( s . endsWith ( FINISHED_VERSION_SUFFIX ) ) { ret . add ( validateAndGetVersion ( s ) ) ; } } Collections . sort ( ret ) ; Collections . reverse ( ret ) ; return ret ; }
Sorted from most recent to oldest
35,709
public static void main ( String [ ] args ) { try { KeyGenerator kgen = KeyGenerator . getInstance ( "Blowfish" ) ; SecretKey skey = kgen . generateKey ( ) ; byte [ ] raw = skey . getEncoded ( ) ; String keyString = new String ( Hex . encodeHex ( raw ) ) ; System . out . println ( "storm -c " + SECRET_KEY + "=" + keySt...
Produce a blowfish key to be used in Storm jar command
35,710
public boolean checkHeartBeat ( ) { String dataPath = executorMeta . getLocalDir ( ) ; File localstate = new File ( dataPath + "/data/" + startType + "/" + startType + ".heartbeat/" ) ; Long modefyTime = localstate . lastModified ( ) ; if ( System . currentTimeMillis ( ) - modefyTime > JOYConstants . EXECUTOR_HEARTBEAT...
check supervisor s heartBeat
35,711
public boolean setJstormConf ( String key , String value ) { String line = " " + key + ": " + value ; try { Files . write ( Paths . get ( "deploy/jstorm/conf/storm.yaml" ) , line . getBytes ( ) , StandardOpenOption . APPEND ) ; } catch ( IOException e ) { LOG . error ( e ) ; return false ; } return true ; }
set local conf
35,712
private Map < String , String > generateSidToHost ( ) { Map < String , String > sidToHostname = new HashMap < > ( ) ; if ( oldAssignment != null ) { sidToHostname . putAll ( oldAssignment . getNodeHost ( ) ) ; } for ( Entry < String , SupervisorInfo > entry : cluster . entrySet ( ) ) { String supervisorId = entry . get...
Do we need just handle the case when type is ASSIGN_TYPE_NEW?
35,713
public static int tryPort ( int port ) throws IOException { ServerSocket socket = new ServerSocket ( port ) ; int rtn = socket . getLocalPort ( ) ; socket . close ( ) ; return rtn ; }
Check whether the port is available to bind
35,714
private Set < String > get_cleanup_ids ( StormClusterState clusterState , List < String > activeTopologies ) throws Exception { List < String > task_ids = clusterState . task_storms ( ) ; List < String > heartbeat_ids = clusterState . heartbeat_storms ( ) ; List < String > error_ids = clusterState . task_error_storms (...
get topology ids that need to be cleaned up
35,715
public Assignment mkAssignment ( TopologyAssignEvent event ) throws Exception { String topologyId = event . getTopologyId ( ) ; LOG . info ( "Determining assignment for " + topologyId ) ; TopologyAssignContext context = prepareTopologyAssign ( event ) ; Set < ResourceWorkerSlot > assignments ; if ( ! StormConfig . loca...
make assignments for a topology The nimbus core function this function has been totally rewrite
35,716
public static Set < Integer > getNewOrChangedTaskIds ( Set < ResourceWorkerSlot > oldWorkers , Set < ResourceWorkerSlot > workers ) { Set < Integer > rtn = new HashSet < > ( ) ; HashMap < String , ResourceWorkerSlot > workerPortMap = HostPortToWorkerMap ( oldWorkers ) ; for ( ResourceWorkerSlot worker : workers ) { Res...
get all task ids which are newly assigned or reassigned
35,717
public static List < WorkerSlot > sortSlots ( Set < WorkerSlot > allSlots , int needSlotNum ) { Map < String , List < WorkerSlot > > nodeMap = new HashMap < > ( ) ; for ( WorkerSlot np : allSlots ) { String node = np . getNodeId ( ) ; List < WorkerSlot > list = nodeMap . get ( node ) ; if ( list == null ) { list = new ...
sort slots the purpose is to ensure that the tasks are assigned in balancing
35,718
public Set < Integer > getUnstoppedSlots ( Set < Integer > aliveTasks , Map < String , SupervisorInfo > supInfos , Assignment existAssignment ) { Set < Integer > ret = new HashSet < > ( ) ; Set < ResourceWorkerSlot > oldWorkers = existAssignment . getWorkers ( ) ; Set < String > aliveSupervisors = supInfos . keySet ( )...
Get unstopped slots from alive task list
35,719
public static void getFreeSlots ( Map < String , SupervisorInfo > supervisorInfos , StormClusterState stormClusterState ) throws Exception { Map < String , Assignment > assignments = Cluster . get_all_assignment ( stormClusterState , null ) ; for ( Entry < String , Assignment > entry : assignments . entrySet ( ) ) { As...
Get free resources
35,720
public Set < Integer > getAliveTasks ( String topologyId , Set < Integer > taskIds ) throws Exception { Set < Integer > aliveTasks = new HashSet < > ( ) ; for ( int taskId : taskIds ) { boolean isDead = NimbusUtils . isTaskDead ( nimbusData , topologyId , taskId ) ; if ( ! isDead ) { aliveTasks . add ( taskId ) ; } } r...
find all alive task ids . Do not assume that clocks are synchronized . Task heartbeat is only used so that nimbus knows when it s received a new heartbeat . All timing is done by nimbus and tracked through task - heartbeat - cache
35,721
public void backupAssignment ( Assignment assignment , TopologyAssignEvent event ) { String topologyId = event . getTopologyId ( ) ; String topologyName = event . getTopologyName ( ) ; try { StormClusterState zkClusterState = nimbusData . getStormClusterState ( ) ; Map < Integer , String > tasks = Cluster . get_all_tas...
Backup topology assignment to ZK
35,722
public BoltDeclarer setBolt ( String id , ITridentBatchBolt bolt , Integer parallelism , Set < String > committerBatches , Map < String , String > batchGroups ) { markBatchGroups ( id , batchGroups ) ; Component c = new Component ( bolt , parallelism , committerBatches ) ; _bolts . put ( id , c ) ; return new BoltDecla...
map from stream name to batch id
35,723
private void commitProgress ( FileOffset position ) { if ( position == null ) { return ; } if ( lock != null && canCommitNow ( ) ) { try { String pos = position . toString ( ) ; lock . heartbeat ( pos ) ; LOG . debug ( "{} Committed progress. {}" , spoutId , pos ) ; acksSinceLastCommit = 0 ; commitTimeElapsed . set ( f...
will commit progress into lock file if commit threshold is reached
35,724
private FileLock getOldestExpiredLock ( ) throws IOException { DirLock dirlock = DirLock . tryLock ( hdfs , lockDirPath ) ; if ( dirlock == null ) { dirlock = DirLock . takeOwnershipIfStale ( hdfs , lockDirPath , lockTimeoutSec ) ; if ( dirlock == null ) { LOG . debug ( "Spout {} could not take over ownership of DirLoc...
If clocks in sync then acquires the oldest expired lock Else on first call just remembers the oldest expired lock on next call check if the lock is updated . if not updated then acquires the lock
35,725
private FileReader createFileReader ( Path file ) throws IOException { if ( readerType . equalsIgnoreCase ( Configs . SEQ ) ) { return new SequenceFileReader ( this . hdfs , file , conf ) ; } if ( readerType . equalsIgnoreCase ( Configs . TEXT ) ) { return new TextFileReader ( this . hdfs , file , conf ) ; } try { Clas...
Creates a reader that reads from beginning of file
35,726
private Path renameToInProgressFile ( Path file ) throws IOException { Path newFile = new Path ( file . toString ( ) + inprogress_suffix ) ; try { if ( hdfs . rename ( file , newFile ) ) { return newFile ; } throw new RenameException ( file , newFile ) ; } catch ( IOException e ) { throw new RenameException ( file , ne...
Renames files with . inprogress suffix
35,727
private Path getFileForLockFile ( Path lockFile , Path sourceDirPath ) throws IOException { String lockFileName = lockFile . getName ( ) ; Path dataFile = new Path ( sourceDirPath + Path . SEPARATOR + lockFileName + inprogress_suffix ) ; if ( hdfs . exists ( dataFile ) ) { return dataFile ; } dataFile = new Path ( sour...
Returns the corresponding input file in the sourceDirPath for the specified lock file . If no such file is found then returns null
35,728
private Path renameCompletedFile ( Path file ) throws IOException { String fileName = file . toString ( ) ; String fileNameMinusSuffix = fileName . substring ( 0 , fileName . indexOf ( inprogress_suffix ) ) ; String newName = new Path ( fileNameMinusSuffix ) . getName ( ) ; Path newFile = new Path ( archiveDirPath + Pa...
renames files and returns the new file path
35,729
public static StormZkClusterState mkStormZkClusterState ( Map conf ) throws Exception { Map realConf = getFullConf ( conf ) ; return new StormZkClusterState ( realConf ) ; }
This function bring some hacks to JStorm this isn t a good way
35,730
public void enqueue ( TaskMessage message , Channel channel ) { while ( ! bstartRec ) { LOG . info ( "check whether deserialize queues have already been created" ) ; boolean isFinishInit = true ; for ( Integer task : workerTasks ) { if ( deserializeQueues . get ( task ) == null ) { isFinishInit = false ; JStormUtils . ...
enqueue a received message
35,731
protected void closeChannel ( Channel channel ) { MessageDecoder . removeTransmitHistogram ( channel ) ; channel . close ( ) . awaitUninterruptibly ( ) ; allChannels . remove ( channel ) ; }
close a channel
35,732
public void close ( ) { LOG . info ( "Begin to shutdown NettyServer" ) ; if ( allChannels != null ) { new Thread ( new Runnable ( ) { public void run ( ) { try { allChannels . close ( ) . await ( 1 , TimeUnit . SECONDS ) ; LOG . info ( "Successfully close all channel" ) ; factory . releaseExternalResources ( ) ; } catc...
close all channels and release resources
35,733
private TimeWindow mergeSessionWindows ( TimeWindow oldWindow , TimeWindow newWindow ) { if ( oldWindow . intersects ( newWindow ) ) { return oldWindow . cover ( newWindow ) ; } return newWindow ; }
merges two windows if there s an overlap between two windows return merged window ; otherwise return the new window itself .
35,734
public static String getMetricValue ( MetricSnapshot snapshot ) { if ( snapshot == null ) return null ; MetricType type = MetricType . parse ( snapshot . get_metricType ( ) ) ; switch ( type ) { case COUNTER : return format ( snapshot . get_longValue ( ) ) ; case GAUGE : return format ( snapshot . get_doubleValue ( ) )...
get MetricSnapshot formatted value string
35,735
public static String extractGroup ( String [ ] strs ) { if ( strs . length < 6 ) return null ; return strs [ strs . length - 2 ] ; }
Extract Group from WM
35,736
public static String extractMetricName ( String [ ] strs ) { if ( strs . length < 6 ) return null ; return strs [ strs . length - 1 ] ; }
Extract MetricName from CC
35,737
public static UIComponentMetric getComponentMetric ( MetricInfo info , int window , String compName , List < ComponentSummary > componentSummaries ) { UIComponentMetric compMetric = new UIComponentMetric ( compName ) ; if ( info != null ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info ....
get the specific component metric
35,738
public static List < UITaskMetric > getTaskMetrics ( MetricInfo info , String component , int window ) { TreeMap < Integer , UITaskMetric > taskData = new TreeMap < > ( ) ; if ( info != null ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info . get_metrics ( ) . entrySet ( ) ) { String nam...
get all task metrics in the specific component
35,739
public static UITaskMetric getTaskMetric ( List < MetricInfo > taskStreamMetrics , String component , int id , int window ) { UITaskMetric taskMetric = new UITaskMetric ( component , id ) ; if ( taskStreamMetrics . size ( ) > 1 ) { MetricInfo info = taskStreamMetrics . get ( 0 ) ; if ( info != null ) { for ( Map . Entr...
get the specific task metric
35,740
public Object remove ( K key ) { for ( Map < K , V > bucket : buckets ) { Object value = bucket . remove ( key ) ; if ( value != null ) { return value ; } } return null ; }
On the side of performance scanning from header is faster on the side of logic it should scan from the end to first .
35,741
public Principal principal ( ) { if ( _subject == null ) return null ; Set < Principal > princs = _subject . getPrincipals ( ) ; if ( princs . size ( ) == 0 ) return null ; return ( Principal ) ( princs . toArray ( ) [ 0 ] ) ; }
The primary principal associated current subject
35,742
public void init ( Cluster cluster , Map < String , Node > nodeIdToNode ) { _cluster = cluster ; _nodeIdToNode = nodeIdToNode ; }
Initialize the pool .
35,743
public void init ( ) { try { initPlugin ( ) ; } catch ( RuntimeException e ) { LOG . error ( "init metrics plugin error:" , e ) ; System . exit ( - 1 ) ; } pushRefreshEvent ( ) ; pushFlushEvent ( ) ; pushMergeEvent ( ) ; pushUploadEvent ( ) ; pushDiagnosisEvent ( ) ; LOG . info ( "Finish" ) ; }
init plugins and start event
35,744
public TopologyMetric getTopologyMetric ( String topologyId ) { long start = System . nanoTime ( ) ; try { TopologyMetric ret = new TopologyMetric ( ) ; List < MetricInfo > topologyMetrics = metricCache . getMetricData ( topologyId , MetaType . TOPOLOGY ) ; List < MetricInfo > componentMetrics = metricCache . getMetric...
get topology metrics note that only topology & component & worker metrics are returned
35,745
public void start ( ) { rotationTimer = new Timer ( true ) ; TimerTask task = new TimerTask ( ) { public void run ( ) { rotationTimerTriggered . set ( true ) ; } } ; rotationTimer . scheduleAtFixedRate ( task , interval , interval ) ; }
Start the timer to run at fixed intervals .
35,746
@ SuppressWarnings ( "unchecked" ) public Object commit ( long batchId , Object state ) { List < Object > stateList = ( List < Object > ) state ; if ( stateOperator != null ) { Object commitState = stateOperator . commit ( batchId , stateList ) ; stateList . add ( commitState ) ; } return stateList ; }
to topology master for persistence
35,747
public void mergeMeters ( MetricInfo metricInfo , String meta , Map < Integer , MetricSnapshot > data ) { Map < Integer , MetricSnapshot > existing = metricInfo . get_metrics ( ) . get ( meta ) ; if ( existing == null ) { metricInfo . put_to_metrics ( meta , data ) ; } else { for ( Map . Entry < Integer , MetricSnapsho...
meters are not sampled .
35,748
public void mergeHistograms ( MetricInfo metricInfo , String meta , Map < Integer , MetricSnapshot > data , Map < String , Integer > metaCounters , Map < String , Map < Integer , Histogram > > histograms ) { Map < Integer , MetricSnapshot > existing = metricInfo . get_metrics ( ) . get ( meta ) ; if ( existing == null ...
histograms are sampled but we just update points
35,749
protected void updateMetricCounters ( String metricName , Map < String , Integer > metricNameCounters ) { if ( metricNameCounters . containsKey ( metricName ) ) { metricNameCounters . put ( metricName , metricNameCounters . get ( metricName ) + 1 ) ; } else { metricNameCounters . put ( metricName , 1 ) ; } }
computes occurrences of specified metric name
35,750
private void mergeCounters ( Map < String , Map < Integer , MetricSnapshot > > newCounters , Map < String , Map < Integer , MetricSnapshot > > oldCounters ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > entry : newCounters . entrySet ( ) ) { String metricName = entry . getKey ( ) ; Map < Integer , M...
sum old counter snapshots and new counter snapshots sums are stored in new snapshots .
35,751
private void track ( Event < T > windowEvent ) { evictionPolicy . track ( windowEvent ) ; triggerPolicy . track ( windowEvent ) ; }
feed the event to the eviction and trigger policies for bookkeeping and optionally firing the trigger .
35,752
private List < Event < T > > scanEvents ( boolean fullScan ) { LOG . debug ( "Scan events, eviction policy {}" , evictionPolicy ) ; List < T > eventsToExpire = new ArrayList < > ( ) ; List < Event < T > > eventsToProcess = new ArrayList < > ( ) ; try { lock . lock ( ) ; Iterator < Event < T > > it = queue . iterator ( ...
Scan events in the queue using the expiration policy to check if the event should be evicted or not .
35,753
public long getEarliestEventTs ( long startTs , long endTs ) { long minTs = Long . MAX_VALUE ; for ( Event < T > event : queue ) { if ( event . getTimestamp ( ) > startTs && event . getTimestamp ( ) <= endTs ) { minTs = Math . min ( minTs , event . getTimestamp ( ) ) ; } } return minTs ; }
Scans the event queue and returns the next earliest event ts between the startTs and endTs
35,754
public int getEventCount ( long referenceTime ) { int count = 0 ; for ( Event < T > event : queue ) { if ( event . getTimestamp ( ) <= referenceTime ) { ++ count ; } } return count ; }
Scans the event queue and returns number of events having timestamp less than or equal to the reference time .
35,755
private void mkRefreshConfThread ( final NimbusData nimbusData ) { nimbusData . getScheduExec ( ) . scheduleAtFixedRate ( new RunnableCallback ( ) { public void run ( ) { LOG . debug ( "checking changes in storm.yaml..." ) ; Map newConf = Utils . readStormConfig ( ) ; if ( Utils . isConfigChanged ( nimbusData . getConf...
handle manual conf changes check every 15 sec
35,756
public static List < HostAndPort > splitToHostsAndPorts ( String hostPortQuorumList ) { String [ ] strings = StringUtils . getStrings ( hostPortQuorumList ) ; int len = 0 ; if ( strings != null ) { len = strings . length ; } List < HostAndPort > list = new ArrayList < HostAndPort > ( len ) ; if ( strings != null ) { fo...
Split a quorum list into a list of hostnames and ports
35,757
public static String buildHostsOnlyList ( List < HostAndPort > hostAndPorts ) { StringBuilder sb = new StringBuilder ( ) ; for ( HostAndPort hostAndPort : hostAndPorts ) { sb . append ( hostAndPort . getHostText ( ) ) . append ( "," ) ; } if ( sb . length ( ) > 0 ) { sb . delete ( sb . length ( ) - 1 , sb . length ( ) ...
Build up to a hosts only list
35,758
public static Config buildConfig ( TopologyDef topologyDef ) { Config conf = new Config ( ) ; conf . putAll ( topologyDef . getConfig ( ) ) ; return conf ; }
Given a topology definition return a populated org . apache . storm . Config instance .
35,759
public static StormTopology buildTopology ( ExecutionContext context ) throws IllegalAccessException , InstantiationException , ClassNotFoundException , NoSuchMethodException , InvocationTargetException { StormTopology topology = null ; TopologyDef topologyDef = context . getTopologyDef ( ) ; if ( ! topologyDef . valid...
Given a topology definition return a Storm topology that can be run either locally or remotely .
35,760
private static void buildComponents ( ExecutionContext context ) throws ClassNotFoundException , NoSuchMethodException , IllegalAccessException , InvocationTargetException , InstantiationException { Collection < BeanDef > cDefs = context . getTopologyDef ( ) . getComponents ( ) ; if ( cDefs != null ) { for ( BeanDef be...
Given a topology definition resolve and instantiate all components found and return a map keyed by the component id .
35,761
private static IRichSpout buildSpout ( SpoutDef def , ExecutionContext context ) throws ClassNotFoundException , IllegalAccessException , InstantiationException , NoSuchMethodException , InvocationTargetException { return ( IRichSpout ) buildObject ( def , context ) ; }
Given a spout definition return a Storm spout implementation by attempting to find a matching constructor in the given spout class . Perform list to array conversion as necessary .
35,762
private static void buildBolts ( ExecutionContext context ) throws ClassNotFoundException , IllegalAccessException , InstantiationException , NoSuchMethodException , InvocationTargetException { for ( BoltDef def : context . getTopologyDef ( ) . getBolts ( ) ) { Class clazz = Class . forName ( def . getClassName ( ) ) ;...
Given a list of bolt definitions build a map of Storm bolts with the bolt definition id as the key . Attempt to coerce the given constructor arguments to a matching bolt constructor as much as possible .
35,763
private static Map mapifySerializations ( List sers ) { Map rtn = new HashMap ( ) ; if ( sers != null ) { int size = sers . size ( ) ; for ( int i = 0 ; i < size ; i ++ ) { if ( sers . get ( i ) instanceof Map ) { rtn . putAll ( ( Map ) sers . get ( i ) ) ; } else { rtn . put ( sers . get ( i ) , null ) ; } } } return ...
add custom KRYO serialization
35,764
public static StormTopology normalizeTopology ( Map stormConf , StormTopology topology , boolean fromConf ) { StormTopology ret = topology . deepCopy ( ) ; Map < String , Object > rawComponents = ThriftTopologyUtils . getComponents ( topology ) ; Map < String , Object > components = ThriftTopologyUtils . getComponents ...
finalize component s task parallelism
35,765
public static void cleanupCorruptTopologies ( NimbusData data ) throws Exception { BlobStore blobStore = data . getBlobStore ( ) ; Set < String > code_ids = Sets . newHashSet ( BlobStoreUtils . code_ids ( blobStore . listKeys ( ) ) ) ; Set < String > active_ids = Sets . newHashSet ( data . getStormClusterState ( ) . ac...
clean the topology which is in ZK but not in local dir
35,766
public Set < String > getGroups ( String user ) throws IOException { if ( cachedGroups . containsKey ( user ) ) { return cachedGroups . get ( user ) ; } Set < String > groups = getUnixGroups ( user ) ; if ( ! groups . isEmpty ( ) ) cachedGroups . put ( user , groups ) ; return groups ; }
Returns list of groups for a user
35,767
private static Set < String > getUnixGroups ( final String user ) throws IOException { String result = "" ; try { result = ShellUtils . execCommand ( ShellUtils . getGroupsForUserCommand ( user ) ) ; } catch ( ExitCodeException e ) { LOG . warn ( "got exception trying to get groups for user " + user , e ) ; return new ...
Get the current user s group list from Unix by running the command groups NOTE . For non - existing user it will return EMPTY list
35,768
public Map < String , Object > toMap ( ) { Map < String , Object > ret = new HashMap < > ( ) ; ret . put ( MetricUploader . METRIC_TIME , timestamp ) ; ret . put ( MetricUploader . METRIC_TYPE , type ) ; return ret ; }
metrics report time
35,769
public void update ( Number obj ) { if ( enable == false ) { return ; } if ( intervalCheck . check ( ) ) { flush ( ) ; } synchronized ( this ) { unflushed = updater . update ( obj , unflushed ) ; } }
In order to improve performance Do
35,770
public void onEvent ( Object event , long sequence , boolean endOfBatch ) throws Exception { if ( event == null ) { return ; } handleEvent ( event , endOfBatch ) ; }
This function need to be implements
35,771
public void refreshTopologies ( ) { TimeTicker ticker = new TimeTicker ( TimeUnit . MILLISECONDS , true ) ; try { doRefreshTopologies ( ) ; LOG . debug ( "Refresh topologies, cost:{}" , ticker . stopAndRestart ( ) ) ; if ( ! context . getNimbusData ( ) . isLeader ( ) ) { syncTopologyMetaForFollower ( ) ; LOG . debug ( ...
refresh metric settings of topologies & metric meta
35,772
private void syncMetaFromCache ( String topology , TopologyMetricContext tmContext ) { if ( ! tmContext . syncMeta ( ) ) { Map < String , Long > meta = context . getMetricCache ( ) . getMeta ( topology ) ; if ( meta != null ) { tmContext . getMemMeta ( ) . putAll ( meta ) ; } tmContext . setSyncMeta ( true ) ; } }
sync metric meta from rocks db into mem cache on startup
35,773
private void syncSysMetaFromRemote ( ) { for ( String topology : JStormMetrics . SYS_TOPOLOGIES ) { if ( context . getTopologyMetricContexts ( ) . containsKey ( topology ) ) { syncMetaFromRemote ( topology , context . getTopologyMetricContexts ( ) . get ( topology ) , Lists . newArrayList ( MetaType . TOPOLOGY , MetaTy...
sync sys topologies from remote because we want to keep all historic metric data thus metric id cannot be changed .
35,774
private void tryHostLock ( String hostPath ) throws Exception { if ( registryOperations . exists ( hostPath ) ) { try { ServiceRecord host = registryOperations . resolve ( hostPath ) ; Long cTime = Long . parseLong ( host . get ( JOYConstants . CTIME , JOYConstants . DEFAULT_CTIME ) ) ; Date now = new Date ( ) ; if ( n...
see if anyone is updating host s port list if not start update this host itself timeout is 45 seconds
35,775
public static List < TaskEntity > getTaskEntities ( TopologyInfo topologyInfo ) { Map < Integer , TaskEntity > tasks = new HashMap < > ( ) ; for ( TaskSummary ts : topologyInfo . get_tasks ( ) ) { tasks . put ( ts . get_taskId ( ) , new TaskEntity ( ts ) ) ; } for ( ComponentSummary cs : topologyInfo . get_components (...
get all task entities in the specific topology
35,776
public static List < TaskEntity > getTaskEntities ( TopologyInfo topologyInfo , String componentName ) { TreeMap < Integer , TaskEntity > tasks = new TreeMap < > ( ) ; for ( ComponentSummary cs : topologyInfo . get_components ( ) ) { String compName = cs . get_name ( ) ; String type = cs . get_type ( ) ; if ( component...
get the task entities in the specific component
35,777
public static TaskEntity getTaskEntity ( List < TaskSummary > tasks , int taskId ) { TaskEntity entity = null ; for ( TaskSummary task : tasks ) { if ( task . get_taskId ( ) == taskId ) { entity = new TaskEntity ( task ) ; break ; } } return entity ; }
get the specific task entity
35,778
public static Map resetZKConfig ( Map conf , String clusterName ) { ClusterConfig nimbus = clusterConfig . get ( clusterName ) ; if ( nimbus == null ) return conf ; conf . put ( Config . STORM_ZOOKEEPER_ROOT , nimbus . getZkRoot ( ) ) ; conf . put ( Config . STORM_ZOOKEEPER_SERVERS , nimbus . getZkServers ( ) ) ; conf ...
to get nimbus client we should reset ZK config
35,779
public static String prettyUptime ( int secs ) { String [ ] [ ] PRETTYSECDIVIDERS = { new String [ ] { "s" , "60" } , new String [ ] { "m" , "60" } , new String [ ] { "h" , "24" } , new String [ ] { "d" , null } } ; int diversize = PRETTYSECDIVIDERS . length ; LinkedList < String > tmp = new LinkedList < > ( ) ; int di...
seconds to string like 30m 40s and 1d 20h 30m 40s
35,780
public static Long parseLong ( String s , long defaultValue ) { try { Long value = Long . parseLong ( s ) ; return value ; } catch ( NumberFormatException e ) { } return defaultValue ; }
return the default value instead of throw an exception
35,781
private static void fillValue2Node ( List < MetricInfo > componentMetrics , Map < String , TopologyNode > nodes ) { String NODE_DIM = MetricDef . EMMITTED_NUM ; List < String > FILTER = Arrays . asList ( MetricDef . EMMITTED_NUM , MetricDef . SEND_TPS , MetricDef . RECV_TPS ) ; for ( MetricInfo info : componentMetrics ...
fill emitted num to nodes
35,782
private static void fillTLCValue2Edge ( List < MetricInfo > componentMetrics , Map < String , TopologyEdge > edges ) { String EDGE_DIM = "." + MetricDef . TUPLE_LIEF_CYCLE ; for ( MetricInfo info : componentMetrics ) { if ( info == null ) continue ; for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric...
fill tuple life cycle time to edges
35,783
public BoltDeclarer setBolt ( String id , BaseWindowedBolt < Tuple > bolt , Number parallelism_hint ) throws IllegalArgumentException { boolean isEventTime = WindowAssigner . isEventTime ( bolt . getWindowAssigner ( ) ) ; if ( isEventTime && bolt . getTimestampExtractor ( ) == null ) { throw new IllegalArgumentExceptio...
Define a new bolt in this topology . This defines a windowed bolt intended for windowing operations .
35,784
public SpoutDeclarer setSpout ( String id , IRichSpout spout , Number parallelism_hint ) throws IllegalArgumentException { validateUnusedId ( id ) ; initCommon ( id , spout , parallelism_hint ) ; _spouts . put ( id , spout ) ; return new SpoutGetter ( id ) ; }
Define a new spout in this topology with the specified parallelism . If the spout declares itself as non - distributed the parallelism_hint will be ignored and only one task will be allocated to this component .
35,785
public SpoutDeclarer setSpout ( String id , IControlSpout spout ) { return setSpout ( id , spout , null ) ; }
Define a new bolt in this topology . This defines a control spout which is a simpler to use but more restricted kind of bolt . Control spouts are intended for making sending control message more simply
35,786
public BoltDeclarer setBolt ( String id , IControlBolt bolt , Number parallelism_hint ) { return setBolt ( id , new ControlBoltExecutor ( bolt ) , parallelism_hint ) ; }
Define a new bolt in this topology . This defines a control bolt which is a simpler to use but more restricted kind of bolt . Control bolts are intended for making sending control message more simply
35,787
public void addWorkerHook ( IWorkerHook workerHook ) { if ( null == workerHook ) { throw new IllegalArgumentException ( "WorkerHook must not be null." ) ; } _workerHooks . add ( ByteBuffer . wrap ( Utils . javaSerialize ( workerHook ) ) ) ; }
Add a new worker lifecycle hook
35,788
private void maybeAddWatermarkInputs ( ComponentCommon common , IRichBolt bolt ) { if ( bolt instanceof WindowedBoltExecutor ) { Set < String > comps = new HashSet < > ( ) ; for ( GlobalStreamId globalStreamId : common . get_inputs ( ) . keySet ( ) ) { comps . add ( globalStreamId . get_componentId ( ) ) ; } for ( Stri...
Add watermark stream to source components of window bolts
35,789
public static String getShowTimeStr ( Integer time ) { if ( time == null ) { return MINUTE_WINDOW_STR ; } else if ( time . equals ( MINUTE_WINDOW ) ) { return MINUTE_WINDOW_STR ; } else if ( time . equals ( HOUR_WINDOW ) ) { return HOUR_WINDOW_STR ; } else if ( time . equals ( DAY_WINDOW ) ) { return DAY_WINDOW_STR ; }...
Default is the latest result
35,790
public static String prettyUptimeStr ( int secs ) { int diversize = PRETTYSECDIVIDERS . length ; List < String > tmp = new ArrayList < String > ( ) ; int div = secs ; for ( int i = 0 ; i < diversize ; i ++ ) { if ( PRETTYSECDIVIDERS [ i ] [ 1 ] != null ) { Integer d = Integer . parseInt ( PRETTYSECDIVIDERS [ i ] [ 1 ] ...
seconds to string like 1d20h30m40s
35,791
public static < T > List < T > getParents ( DirectedGraph g , T n ) { List < IndexedEdge > incoming = new ArrayList ( g . incomingEdgesOf ( n ) ) ; Collections . sort ( incoming ) ; List < T > ret = new ArrayList ( ) ; for ( IndexedEdge e : incoming ) { ret . add ( ( T ) e . source ) ; } return ret ; }
Assumes edge contains an index
35,792
public Set < ResourceWorkerSlot > getKeepAssign ( DefaultTopologyAssignContext defaultContext , Set < Integer > needAssigns ) { Set < Integer > keepAssignIds = new HashSet < > ( ) ; keepAssignIds . addAll ( defaultContext . getAllTaskIds ( ) ) ; keepAssignIds . removeAll ( defaultContext . getUnstoppedTaskIds ( ) ) ; k...
Get the task Map which the task is alive and will be kept only when type is ASSIGN_TYPE_MONITOR it is valid
35,793
public JStormCache putMetricData ( String topologyId , TopologyMetric tpMetric ) { Map < String , Object > batchData = new HashMap < > ( ) ; long ts = System . currentTimeMillis ( ) ; int tp = 0 , comp = 0 , compStream = 0 , task = 0 , stream = 0 , worker = 0 , netty = 0 ; if ( tpMetric . get_componentMetric ( ) . get_...
store 30min metric data . the metric data is stored in a ring .
35,794
public Stream setMemoryLoad ( Number onHeap , Number offHeap ) { _node . setMemoryLoad ( onHeap , offHeap ) ; return this ; }
Sets the Memory Load resources for the current operation .
35,795
public Stream project ( Fields keepFields ) { projectionValidation ( keepFields ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , keepFields , new Fields ( ) , new ProjectedProcessor ( keepFields ) ) ) ; }
Filters out fields from a stream resulting in a Stream containing only the fields specified by keepFields .
35,796
public Stream partitionAggregate ( Fields inputFields , Aggregator agg , Fields functionFields ) { projectionValidation ( inputFields ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , functionFields , functionFields , new AggregateProcessor ( inputFields , ag...
creates brand new tuples with brand new fields
35,797
public Stream map ( MapFunction function ) { projectionValidation ( getOutputFields ( ) ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , getOutputFields ( ) , getOutputFields ( ) , new MapProcessor ( getOutputFields ( ) , new MapFunctionExecutor ( function )...
Returns a stream consisting of the result of applying the given mapping function to the values of this stream .
35,798
public Stream flatMap ( FlatMapFunction function ) { projectionValidation ( getOutputFields ( ) ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , getOutputFields ( ) , getOutputFields ( ) , new MapProcessor ( getOutputFields ( ) , new FlatMapFunctionExecutor ...
Returns a stream consisting of the results of replacing each value of this stream with the contents produced by applying the provided mapping function to each value . This has the effect of applying a one - to - many transformation to the values of the stream and then flattening the resulting elements into a new stream...
35,799
public Stream peek ( Consumer action ) { projectionValidation ( getOutputFields ( ) ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , getOutputFields ( ) , getOutputFields ( ) , new MapProcessor ( getOutputFields ( ) , new ConsumerExecutor ( action ) ) ) ) ; ...
Returns a stream consisting of the trident tuples of this stream additionally performing the provided action on each trident tuple as they are consumed from the resulting stream . This is mostly useful for debugging to see the tuples as they flow past a certain point in a pipeline .