idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
7,300 | final Deferred < Object > delete ( final byte [ ] key , final byte [ ] [ ] qualifiers ) { return client . delete ( new DeleteRequest ( table , key , FAMILY , qualifiers ) ) ; } | Deletes the given cells from the data table . |
7,301 | static String date ( final long timestamp ) { if ( ( timestamp & Const . SECOND_MASK ) != 0 ) { return new Date ( timestamp ) . toString ( ) ; } else { return new Date ( timestamp * 1000 ) . toString ( ) ; } } | Transforms a UNIX timestamp into a human readable date . |
7,302 | public static List < Scanner > getScanners ( final Query query ) { final List < Scanner > scanners = new ArrayList < Scanner > ( Const . SALT_WIDTH ( ) > 0 ? Const . SALT_BUCKETS ( ) : 1 ) ; if ( Const . SALT_WIDTH ( ) > 0 ) { for ( int i = 0 ; i < Const . SALT_BUCKETS ( ) ; i ++ ) { scanners . add ( ( ( TsdbQuery ) qu... | Returns a set of scanners one for each bucket if salted or one scanner if salting is disabled . |
7,303 | public static long baseTime ( final TSDB tsdb , final byte [ ] row ) { return Bytes . getUnsignedInt ( row , Const . SALT_WIDTH ( ) + TSDB . metrics_width ( ) ) ; } | Extracts the timestamp from a row key . |
7,304 | public static void setBaseTime ( final byte [ ] row , int base_time ) { Bytes . setInt ( row , base_time , Const . SALT_WIDTH ( ) + TSDB . metrics_width ( ) ) ; } | Sets the time in a raw data table row key |
7,305 | public static Cell parseSingleValue ( final KeyValue column ) { if ( column . qualifier ( ) . length == 2 || ( column . qualifier ( ) . length == 4 && inMilliseconds ( column . qualifier ( ) ) ) ) { final ArrayList < KeyValue > row = new ArrayList < KeyValue > ( 1 ) ; row . add ( column ) ; final ArrayList < Cell > cel... | Extracts a Cell from a single data point fixing potential errors with the qualifier flags |
7,306 | public static byte getValueLengthFromQualifier ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; short length ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { length = ( short ) ( qualifier [ offset + 3 ] & Internal . LENGTH_MASK ) ; } else { ... | Returns the length of the value in bytes parsed from the qualifier |
7,307 | public static short getFlagsFromQualifier ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { return ( short ) ( qualifier [ offset + 3 ] & Internal . FLAGS_MASK ) ; } else { return ( short ) ( qual... | Parses the flag bits from the qualifier |
7,308 | public static boolean isFloat ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { return ( qualifier [ offset + 3 ] & Const . FLAG_FLOAT ) == Const . FLAG_FLOAT ; } else { return ( qualifier [ offse... | Parses the qualifier to determine if the data is a floating point value . 4 bytes == Float 8 bytes == Double |
7,309 | public static byte [ ] extractQualifier ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { return new byte [ ] { qualifier [ offset ] , qualifier [ offset + 1 ] , qualifier [ offset + 2 ] , qualifi... | Extracts the 2 or 4 byte qualifier from a compacted byte array |
7,310 | public static byte [ ] buildQualifier ( final long timestamp , final short flags ) { final long base_time ; if ( ( timestamp & Const . SECOND_MASK ) != 0 ) { base_time = ( ( timestamp / 1000 ) - ( ( timestamp / 1000 ) % Const . MAX_TIMESPAN ) ) ; final int qual = ( int ) ( ( ( timestamp - ( base_time * 1000 ) << ( Cons... | Returns a 2 or 4 byte qualifier based on the timestamp and the flags . If the timestamp is in seconds this returns a 2 byte qualifier . If it s in milliseconds returns a 4 byte qualifier |
7,311 | private static void validateQualifier ( final byte [ ] qualifier , final int offset ) { if ( offset < 0 || offset >= qualifier . length - 1 ) { throw new IllegalDataException ( "Offset of [" + offset + "] is out of bounds for the qualifier length of [" + qualifier . length + "]" ) ; } } | Checks the qualifier to verify that it has data and that the offset is within bounds |
7,312 | public static long getMaxUnsignedValueOnBytes ( final int width ) { if ( width < 0 || width > 8 ) { throw new IllegalArgumentException ( "Width must be from 1 to 8 bytes: " + width ) ; } if ( width < 8 ) { return ( ( long ) 1 << width * Byte . SIZE ) - 1 ; } else { return Long . MAX_VALUE ; } } | Simple helper to calculate the max value for any width of long from 0 to 8 bytes . |
7,313 | public static byte [ ] vleEncodeLong ( final long value ) { if ( Byte . MIN_VALUE <= value && value <= Byte . MAX_VALUE ) { return new byte [ ] { ( byte ) value } ; } else if ( Short . MIN_VALUE <= value && value <= Short . MAX_VALUE ) { return Bytes . fromShort ( ( short ) value ) ; } else if ( Integer . MIN_VALUE <= ... | Encodes a long on 1 2 4 or 8 bytes |
7,314 | public static long getTimeStampFromNonDP ( final long base_time , byte [ ] quantifier ) { long ret = base_time ; if ( quantifier . length == 3 ) { ret += quantifier [ 1 ] << 8 | ( quantifier [ 2 ] & 0xFF ) ; ret *= 1000 ; } else if ( quantifier . length == 5 ) { ret *= 1000 ; ret += ( quantifier [ 1 ] & 0xFF ) << 24 | ... | Get timestamp from base time and quantifier for non datapoints . The returned time will always be in ms . |
7,315 | public static HistogramDataPoint decodeHistogramDataPoint ( final TSDB tsdb , final KeyValue kv ) { long timestamp = Internal . baseTime ( kv . key ( ) ) ; return decodeHistogramDataPoint ( tsdb , timestamp , kv . qualifier ( ) , kv . value ( ) ) ; } | Decode the histogram point from the given key value |
7,316 | public static HistogramDataPoint decodeHistogramDataPoint ( final TSDB tsdb , final long base_time , final byte [ ] qualifier , final byte [ ] value ) { final HistogramDataPointCodec decoder = tsdb . histogramManager ( ) . getCodec ( ( int ) value [ 0 ] ) ; long timestamp = getTimeStampFromNonDP ( base_time , qualifier... | Decode the histogram point from the given key and values |
7,317 | public static boolean isValidQuery ( final RollupQuery rollup_query ) { return ( rollup_query != null && rollup_query . rollup_interval != null && ! rollup_query . rollup_interval . isDefaultInterval ( ) ) ; } | Does it contain a valid rollup interval mainly says it is not the default rollup . Default rollup is of same resolution as raw data . So if true which means the raw cell column qualifier is encoded with the aggregate function and the cell is not appended or compacted |
7,318 | public void setQuery ( final String metric , final Map < String , String > tags ) { this . metric = metric ; this . tags = tags ; metric_uid = tsdb . getUID ( UniqueIdType . METRIC , metric ) ; tag_uids = Tags . resolveAll ( tsdb , tags ) ; } | Sets the query to perform |
7,319 | public static Deferred < byte [ ] > tsuidFromMetric ( final TSDB tsdb , final String metric , final Map < String , String > tags ) { if ( metric == null || metric . isEmpty ( ) ) { throw new IllegalArgumentException ( "The metric cannot be empty" ) ; } if ( tags == null || tags . isEmpty ( ) ) { throw new IllegalArgume... | Converts the given metric and tags to a TSUID by resolving the strings to their UIDs . Note that the resulting TSUID may not exist if the combination was not written to TSDB |
7,320 | private Deferred < IncomingDataPoint > resolveNames ( final IncomingDataPoint dp ) { if ( metric != null ) { dp . setMetric ( metric ) ; dp . setTags ( ( HashMap < String , String > ) tags ) ; return Deferred . fromResult ( dp ) ; } class TagsCB implements Callback < IncomingDataPoint , HashMap < String , String > > { ... | Resolve the UIDs to names . If the query was for a metric and tags then we can just use those . |
7,321 | private Scanner getScanner ( ) { final Scanner scanner = tsdb . getClient ( ) . newScanner ( tsdb . metaTable ( ) ) ; scanner . setStartKey ( metric_uid ) ; final long stop = UniqueId . uidToLong ( metric_uid , TSDB . metrics_width ( ) ) + 1 ; scanner . setStopKey ( UniqueId . longToUID ( stop , TSDB . metrics_width ( ... | Configures the scanner for a specific metric and optional tags |
7,322 | public static int getRollupBasetime ( final long timestamp , final RollupInterval interval ) { if ( timestamp < 0 ) { throw new IllegalArgumentException ( "Not supporting negative " + "timestamps at this time: " + timestamp ) ; } if ( interval . getUnits ( ) == 'h' ) { int modulo = Const . MAX_TIMESPAN ; if ( interval ... | Calculates the base time for a rollup interval the time that can be stored in the row key . |
7,323 | public static long getTimestampFromRollupQualifier ( final byte [ ] qualifier , final long base_time , final RollupInterval interval , final int offset ) { return ( base_time * 1000 ) + getOffsetFromRollupQualifier ( qualifier , offset , interval ) ; } | Returns the absolute timestamp of a data point qualifier in milliseconds |
7,324 | private void loadFromFile ( ) { if ( file_location != null && ! file_location . isEmpty ( ) ) { final File file = new File ( file_location ) ; if ( ! file . exists ( ) ) { LOG . warn ( "Query override file " + file_location + " does not exist" ) ; return ; } try { final String raw_json = Files . toString ( file , Const... | Attempts to load the file from disk |
7,325 | public void skipWhitespaces ( ) { for ( int i = mark ; i < chars . length ; i ++ ) { if ( Character . isWhitespace ( chars [ i ] ) ) { mark ++ ; } else { break ; } } } | Increments the mark over white spaces |
7,326 | public static final void setGlobalTags ( final Config config ) { if ( config == null ) { throw new IllegalArgumentException ( "Configuration cannot be null." ) ; } if ( config . getBoolean ( "tsd.core.stats_with_port" ) ) { global_tags = new HashMap < String , String > ( 1 ) ; global_tags . put ( "port" , config . getS... | Parses the configuration to determine if any extra tags should be included with every stat emitted . |
7,327 | public void setStartTime ( final long timestamp ) { if ( timestamp < 0 || ( ( timestamp & Const . SECOND_MASK ) != 0 && timestamp > 9999999999999L ) ) { throw new IllegalArgumentException ( "Invalid timestamp: " + timestamp ) ; } else if ( end_time != UNSET && timestamp >= getEndTime ( ) ) { throw new IllegalArgumentEx... | Sets the start time for the query |
7,328 | public void downsample ( final long interval , final Aggregator downsampler ) { if ( downsampler == Aggregators . NONE ) { throw new IllegalArgumentException ( "cannot use the NONE " + "aggregator for downsampling" ) ; } downsample ( interval , downsampler , FillPolicy . NONE ) ; } | Sets an optional downsampling function with interpolation on this query . |
7,329 | private byte [ ] tableToBeScanned ( ) { final byte [ ] tableName ; if ( RollupQuery . isValidQuery ( rollup_query ) ) { if ( pre_aggregate ) { tableName = rollup_query . getRollupInterval ( ) . getGroupbyTable ( ) ; } else { tableName = rollup_query . getRollupInterval ( ) . getTemporalTable ( ) ; } } else if ( pre_agg... | Identify the table to be scanned based on the roll up and pre - aggregate query parameters |
7,330 | private long getScanStartTimeSeconds ( ) { long start = getStartTime ( ) ; if ( ( start & Const . SECOND_MASK ) != 0L ) { start /= 1000L ; } if ( rollup_query != null ) { long base_time = RollupUtils . getRollupBasetime ( start , rollup_query . getRollupInterval ( ) ) ; if ( rate ) { base_time = RollupUtils . getRollup... | Returns the UNIX timestamp from which we must start scanning . |
7,331 | private long getScanEndTimeSeconds ( ) { long end = getEndTime ( ) ; if ( ( end & Const . SECOND_MASK ) != 0L ) { end /= 1000L ; if ( end - ( end * 1000 ) < 1 ) { end ++ ; } } if ( rollup_query != null ) { return RollupUtils . getRollupBasetime ( end + ( rollup_query . getRollupInterval ( ) . getIntervalSeconds ( ) * r... | Returns the UNIX timestamp at which we must stop scanning . |
7,332 | private void createAndSetFilter ( final Scanner scanner ) { QueryUtil . setDataTableScanFilter ( scanner , group_bys , row_key_literals , explicit_tags , enable_fuzzy_filter , ( end_time == UNSET ? - 1 : ( int ) getScanEndTimeSeconds ( ) ) ) ; } | Sets the server - side regexp filter on the scanner . In order to find the rows with the relevant tags we use a server - side filter that matches a regular expression on the row key . |
7,333 | public void transformDownSamplerToRollupQuery ( final Aggregator group_by , final String str_interval ) { if ( downsampler != null && downsampler . getInterval ( ) > 0 ) { if ( tsdb . getRollupConfig ( ) != null ) { try { best_match_rollups = tsdb . getRollupConfig ( ) . getRollupInterval ( downsampler . getInterval ( ... | Transform downsampler properties to rollup properties if the rollup is enabled at configuration level and down sampler is set . It falls back to raw data and down sampling if there is no RollupInterval is configured against this down sample interval |
7,334 | private void transformRollupQueryToDownSampler ( ) { if ( rollup_query != null ) { downsampler = new DownsamplingSpecification ( rollup_query . getRollupInterval ( ) . getIntervalSeconds ( ) * 1000 , rollup_query . getRollupAgg ( ) , ( downsampler != null ? downsampler . getFillPolicy ( ) : FillPolicy . ZERO ) ) ; roll... | Transform rollup query to downsampler It is mainly useful when it scan on raw data on fallback . |
7,335 | public boolean copyChanges ( final Tree tree , final boolean overwrite ) { if ( tree == null ) { throw new IllegalArgumentException ( "Cannot copy a null tree" ) ; } if ( tree_id != tree . tree_id ) { throw new IllegalArgumentException ( "Tree IDs do not match" ) ; } if ( overwrite || tree . changed . get ( "name" ) ) ... | Copies changes from the incoming tree into the local tree overriding if called to . Only parses user mutable fields excluding rules . |
7,336 | public void addRule ( final TreeRule rule ) { if ( rule == null ) { throw new IllegalArgumentException ( "Null rules are not accepted" ) ; } if ( rules == null ) { rules = new TreeMap < Integer , TreeMap < Integer , TreeRule > > ( ) ; } TreeMap < Integer , TreeRule > level = rules . get ( rule . getLevel ( ) ) ; if ( l... | Adds the given rule to the tree replacing anything in the designated spot |
7,337 | public void addCollision ( final String tsuid , final String existing_tsuid ) { if ( tsuid == null || tsuid . isEmpty ( ) ) { throw new IllegalArgumentException ( "Empty or null collisions not allowed" ) ; } if ( collisions == null ) { collisions = new HashMap < String , String > ( ) ; } if ( ! collisions . containsKey... | Adds a TSUID to the collision local list must then be synced with storage |
7,338 | public void addNotMatched ( final String tsuid , final String message ) { if ( tsuid == null || tsuid . isEmpty ( ) ) { throw new IllegalArgumentException ( "Empty or null non matches not allowed" ) ; } if ( not_matched == null ) { not_matched = new HashMap < String , String > ( ) ; } if ( ! not_matched . containsKey (... | Adds a TSUID to the not - matched local list when strict_matching is enabled . Must be synced with storage . |
7,339 | public Deferred < Boolean > storeTree ( final TSDB tsdb , final boolean overwrite ) { if ( tree_id < 1 || tree_id > 65535 ) { throw new IllegalArgumentException ( "Invalid Tree ID" ) ; } boolean has_changes = false ; for ( Map . Entry < String , Boolean > entry : changed . entrySet ( ) ) { if ( entry . getValue ( ) ) {... | Attempts to store the tree definition via a CompareAndSet call . |
7,340 | public TreeRule getRule ( final int level , final int order ) { if ( rules == null || rules . isEmpty ( ) ) { return null ; } TreeMap < Integer , TreeRule > rule_level = rules . get ( level ) ; if ( rule_level == null || rule_level . isEmpty ( ) ) { return null ; } return rule_level . get ( order ) ; } | Retrieves a single rule from the rule set given a level and order |
7,341 | public static Deferred < Tree > fetchTree ( final TSDB tsdb , final int tree_id ) { if ( tree_id < 1 || tree_id > 65535 ) { throw new IllegalArgumentException ( "Invalid Tree ID" ) ; } final GetRequest get = new GetRequest ( tsdb . treeTable ( ) , idToBytes ( tree_id ) ) ; get . family ( TREE_FAMILY ) ; final class Fet... | Attempts to fetch the given tree from storage loading the rule set at the same time . |
7,342 | public static Deferred < List < Tree > > fetchAllTrees ( final TSDB tsdb ) { final Deferred < List < Tree > > result = new Deferred < List < Tree > > ( ) ; final class AllTreeScanner implements Callback < Object , ArrayList < ArrayList < KeyValue > > > { private final List < Tree > trees = new ArrayList < Tree > ( ) ; ... | Attempts to retrieve all trees from the UID table including their rules . If no trees were found the result will be an empty list |
7,343 | private byte [ ] toStorageJson ( ) { final ByteArrayOutputStream output = new ByteArrayOutputStream ( ) ; try { final JsonGenerator json = JSON . getFactory ( ) . createGenerator ( output ) ; json . writeStartObject ( ) ; json . writeStringField ( "name" , name ) ; json . writeStringField ( "description" , description ... | Converts the object to a JSON byte array necessary for CAS calls and to keep redundant data down |
7,344 | private static boolean printResult ( final ArrayList < KeyValue > row , final byte [ ] family , final boolean formard ) { if ( null == row || row . isEmpty ( ) ) { return false ; } final byte [ ] key = row . get ( 0 ) . key ( ) ; String name = formard ? CliUtils . fromBytes ( key ) : null ; String id = formard ? null :... | Helper to print the cells in a given family for a given row if any . |
7,345 | private static int findAndPrintRow ( final HBaseClient client , final byte [ ] table , final byte [ ] key , final byte [ ] family , boolean formard ) { final GetRequest get = new GetRequest ( table , key ) ; get . family ( family ) ; ArrayList < KeyValue > row ; try { row = client . get ( get ) . joinUninterruptibly ( ... | Gets a given row in HBase and prints it on standard output . |
7,346 | private static int extactLookupId ( final HBaseClient client , final byte [ ] table , final short idwidth , final String kind , final byte [ ] id ) { final UniqueId uid = new UniqueId ( client , table , kind , ( int ) idwidth ) ; try { final String name = uid . getName ( id ) ; System . out . println ( kind + ' ' + nam... | Looks up an ID for a given kind and prints it if found . |
7,347 | private static byte [ ] idInBytes ( final short idwidth , final long lid ) { if ( idwidth <= 0 ) { throw new AssertionError ( "negative idwidth: " + idwidth ) ; } final byte [ ] id = Bytes . fromLong ( lid ) ; for ( int i = 0 ; i < id . length - idwidth ; i ++ ) { if ( id [ i ] != 0 ) { System . err . println ( lid + "... | Transforms an ID into the corresponding byte array . |
7,348 | private static int extactLookupName ( final HBaseClient client , final byte [ ] table , final short idwidth , final String kind , final String name ) { final UniqueId uid = new UniqueId ( client , table , kind , ( int ) idwidth ) ; try { final byte [ ] id = uid . getId ( name ) ; System . out . println ( kind + ' ' + n... | Looks up a name for a given kind and prints it if found . |
7,349 | private static int purgeTree ( final TSDB tsdb , final int tree_id , final boolean delete_definition ) throws Exception { final TreeSync sync = new TreeSync ( tsdb , 0 , 1 , 0 ) ; return sync . purgeTree ( tree_id , delete_definition ) ; } | Attempts to delete the branches leaves collisions and not - matched entries for a given tree . Optionally removes the tree definition itself |
7,350 | public Deferred < TreeMap < byte [ ] , Span > > fetch ( ) { if ( tags . isEmpty ( ) ) { return Deferred . fromResult ( null ) ; } startFetch ( ) ; return results ; } | Initiate the get requests and return the tree map of results . |
7,351 | private void startFetch ( ) { prepareConcurrentMultiGetTasks ( ) ; fetch_start_time = System . currentTimeMillis ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Start to fetch data using multiget, there will be " + multi_get_wait_cnt + " multigets to call" ) ; } for ( int con_idx = 0 ; con_idx < concurrency_mult... | Start the work of firing up X concurrent get requests . |
7,352 | public static void setDataTableScanFilter ( final Scanner scanner , final List < byte [ ] > group_bys , final ByteMap < byte [ ] [ ] > row_key_literals , final boolean explicit_tags , final boolean enable_fuzzy_filter , final int end_time ) { if ( ( group_bys == null || group_bys . isEmpty ( ) ) && ( row_key_literals =... | Sets a filter or filter list on the scanner based on whether or not the query had tags it needed to match . |
7,353 | public static String getRowKeyTSUIDRegex ( final List < String > tsuids ) { Collections . sort ( tsuids ) ; final short metric_width = TSDB . metrics_width ( ) ; int tags_length = 0 ; final ArrayList < byte [ ] > uids = new ArrayList < byte [ ] > ( tsuids . size ( ) ) ; for ( final String tsuid : tsuids ) { final Strin... | Creates a regular expression with a list of or d TUIDs to compare against the rows in storage . |
7,354 | public static Scanner getMetricScanner ( final TSDB tsdb , final int salt_bucket , final byte [ ] metric , final int start , final int stop , final byte [ ] table , final byte [ ] family ) { final short metric_width = TSDB . metrics_width ( ) ; final int metric_salt_width = metric_width + Const . SALT_WIDTH ( ) ; final... | Compiles an HBase scanner against the main data table |
7,355 | public static void addId ( final StringBuilder buf , final byte [ ] id , final boolean close ) { boolean backslash = false ; for ( final byte b : id ) { buf . append ( ( char ) ( b & 0xFF ) ) ; if ( b == 'E' && backslash ) { buf . append ( "\\\\E\\Q" ) ; } else { backslash = b == '\\' ; } } if ( close ) { buf . append ... | Appends the given UID to the given regular expression buffer |
7,356 | public static String byteRegexToString ( final String regexp ) { final StringBuilder buf = new StringBuilder ( ) ; for ( int i = 0 ; i < regexp . length ( ) ; i ++ ) { if ( i > 0 && regexp . charAt ( i - 1 ) == 'Q' ) { if ( regexp . charAt ( i - 3 ) == '*' ) { byte [ ] tagk = new byte [ TSDB . tagk_width ( ) ] ; for ( ... | Little helper to print out the regular expression by converting the UID bytes to an array . |
7,357 | private void computeUnion ( ) { final ByteMap < ExpressionDataPoint [ ] > ordered_union = new ByteMap < ExpressionDataPoint [ ] > ( ) ; final Iterator < ITimeSyncedIterator > it = queries . values ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final ITimeSyncedIterator sub = it . next ( ) ; final ExpressionDataPoint... | Computes the union of all sets matching on tags and optionally the aggregated tags across each variable . |
7,358 | static byte [ ] flattenTags ( final boolean use_query_tags , final boolean include_agg_tags , final ExpressionDataPoint dp , final ITimeSyncedIterator sub ) { if ( dp . tags ( ) == null || dp . tags ( ) . isEmpty ( ) ) { return HBaseClient . EMPTY_ARRAY ; } final int tagk_width = TSDB . tagk_width ( ) ; final int tagv_... | Creates a key based on the concatenation of the tag pairs then the agg tag keys . |
7,359 | public void reset ( DataPoint dp ) { this . timestamp = dp . timestamp ( ) ; this . is_integer = dp . isInteger ( ) ; if ( is_integer ) { this . value = dp . longValue ( ) ; } else { this . value = Double . doubleToRawLongBits ( dp . doubleValue ( ) ) ; } } | Resets with a new data point . |
7,360 | public static MutableDataPoint ofLongValue ( final long timestamp , final long value ) { final MutableDataPoint dp = new MutableDataPoint ( ) ; dp . reset ( timestamp , value ) ; return dp ; } | Resets with a new pair of a timestamp and a long value . |
7,361 | public void setSeries ( final String metric , final Map < String , String > tags ) { IncomingDataPoints . checkMetricAndTags ( metric , tags ) ; try { row_key = IncomingDataPoints . rowKeyTemplate ( tsdb , metric , tags ) ; RowKey . prefixKeyWithSalt ( row_key ) ; reset ( ) ; } catch ( RuntimeException e ) { throw e ; ... | Sets the metric name and tags of this batch . This method only need be called if there is a desire to reuse the data structure after the data has been flushed . This will reset all cached information in this data structure . |
7,362 | private void reset ( ) { size = 0 ; qualifier_index = 0 ; value_index = 0 ; base_time = Long . MIN_VALUE ; last_timestamp = Long . MIN_VALUE ; } | Resets the indices without overwriting the buffers . So the same amount of space will remain allocated . |
7,363 | public Deferred < Object > persist ( ) { final byte [ ] q = Arrays . copyOfRange ( batched_qualifier , 0 , qualifier_index ) ; final byte [ ] v = Arrays . copyOfRange ( batched_value , 0 , value_index ) ; final byte [ ] r = Arrays . copyOfRange ( row_key , 0 , row_key . length ) ; final long base_time = this . base_tim... | A copy of the values is created and sent with a put request . A reset is initialized which makes this data structure ready to be reused for the same metric and tags but for a different hour of data . |
7,364 | private void ensureCapacity ( final byte [ ] next_qualifier , final byte [ ] next_value ) { if ( qualifier_index + next_qualifier . length >= batched_qualifier . length ) { batched_qualifier = Arrays . copyOf ( batched_qualifier , batched_qualifier . length * 2 ) ; } if ( value_index + next_value . length >= batched_va... | Checks the size of the qualifier and value arrays to make sure we have space . If not then we double the size of the arrays . This way a row allocates space for a full hour of second data but if the user requires millisecond storage with more than 3600 points it will expand . |
7,365 | private void append ( final byte [ ] next_qualifier , final byte [ ] next_value ) { ensureCapacity ( next_qualifier , next_value ) ; System . arraycopy ( next_value , 0 , batched_value , value_index , next_value . length ) ; value_index += next_value . length ; System . arraycopy ( next_qualifier , 0 , batched_qualifie... | Appends the value and qualifier to the appropriate arrays |
7,366 | private int qualifierOffset ( final int i ) { int offset = 0 ; for ( int j = 0 ; j < i ; j ++ ) { offset += Internal . getQualifierLength ( batched_qualifier , offset ) ; } return offset ; } | Computes the proper offset to reach qualifier |
7,367 | private boolean isInteger ( final int i , final int q_offset ) { final short flags = Internal . getFlagsFromQualifier ( batched_qualifier , q_offset ) ; return ( flags & Const . FLAG_FLOAT ) == 0x0 ; } | Tells whether or not the ith value is integer . Uses pre - computed qualifier offset . |
7,368 | private void handleExpressionQuery ( final TSDB tsdb , final HttpQuery query ) { final net . opentsdb . query . pojo . Query v2_query = JSON . parseToObject ( query . getContent ( ) , net . opentsdb . query . pojo . Query . class ) ; v2_query . validate ( ) ; checkAuthorization ( tsdb , query . channel ( ) , v2_query )... | Handles an expression query |
7,369 | private static void parseMTypeSubQuery ( final String query_string , TSQuery data_query ) { if ( query_string == null || query_string . isEmpty ( ) ) { throw new BadRequestException ( "The query string was empty" ) ; } final String [ ] parts = Tags . splitString ( query_string , ':' ) ; int i = parts . length ; if ( i ... | Parses a query string m = ... type query and adds it to the TSQuery . This will generate a TSSubQuery and add it to the TSQuery if successful |
7,370 | private static void parseTsuidTypeSubQuery ( final String query_string , TSQuery data_query ) { if ( query_string == null || query_string . isEmpty ( ) ) { throw new BadRequestException ( "The tsuid query string was empty" ) ; } final String [ ] parts = Tags . splitString ( query_string , ':' ) ; int i = parts . length... | Parses a tsuid = ... type query and adds it to the TSQuery . This will generate a TSSubQuery and add it to the TSQuery if successful |
7,371 | private LastPointQuery parseLastPointQuery ( final TSDB tsdb , final HttpQuery http_query ) { final LastPointQuery query = new LastPointQuery ( ) ; if ( http_query . hasQueryStringParam ( "resolve" ) ) { query . setResolveNames ( true ) ; } if ( http_query . hasQueryStringParam ( "back_scan" ) ) { try { query . setBack... | Parses a last point query from the URI string |
7,372 | public Map < String , String > getPrintableHeaders ( ) { final Map < String , String > headers = new HashMap < String , String > ( request . headers ( ) . entries ( ) . size ( ) ) ; for ( final Entry < String , String > header : request . headers ( ) . entries ( ) ) { if ( header . getKey ( ) . toLowerCase ( ) . equals... | Copies the header list and obfuscates the cookie header in case it contains auth tokens etc . Note that it flattens duplicate headers keys as comma separated lists per the RFC |
7,373 | public Map < String , String > getHeaders ( ) { final Map < String , String > headers = new HashMap < String , String > ( request . headers ( ) . entries ( ) . size ( ) ) ; for ( final Entry < String , String > header : request . headers ( ) . entries ( ) ) { if ( headers . containsKey ( header . getKey ( ) ) ) { heade... | Copies the header list so modifications won t affect the original set . Note that it flattens duplicate headers keys as comma separated lists per the RFC |
7,374 | public String getHeaderValue ( final String headerName ) { if ( headerName == null ) { return null ; } return request . headers ( ) . get ( headerName ) ; } | Return the value of the given HTTP Header first match wins |
7,375 | public Map < String , List < String > > getQueryString ( ) { if ( querystring == null ) { try { querystring = new QueryStringDecoder ( request . getUri ( ) ) . getParameters ( ) ; } catch ( IllegalArgumentException e ) { throw new BadRequestException ( "Bad query string: " + e . getMessage ( ) ) ; } } return querystrin... | Returns the query string parameters passed in the URI . |
7,376 | public Charset getCharset ( ) { for ( String type : this . request . headers ( ) . getAll ( "Content-Type" ) ) { int idx = type . toUpperCase ( ) . indexOf ( "CHARSET=" ) ; if ( idx > 1 ) { String charset = type . substring ( idx + 8 ) ; return Charset . forName ( charset ) ; } } return Charset . forName ( "UTF-8" ) ; ... | Attempts to parse the character set from the request header . If not set defaults to UTF - 8 |
7,377 | public void done ( ) { final int processing_time = processingTimeMillis ( ) ; final String url = request . getUri ( ) ; final String msg = String . format ( "HTTP %s done in %d ms" , url , processing_time ) ; if ( url . startsWith ( "/api/put" ) && LOG . isDebugEnabled ( ) ) { LOG . debug ( msg ) ; } else { logInfo ( m... | Method to call after writing the HTTP response to the wire . The default is to simply log the request info . Can be overridden by subclasses . |
7,378 | public void sendStatusOnly ( final HttpResponseStatus status ) { if ( ! chan . isConnected ( ) ) { if ( stats != null ) { stats . markSendFailed ( ) ; } done ( ) ; return ; } response . setStatus ( status ) ; final boolean keepalive = HttpHeaders . isKeepAlive ( request ) ; if ( keepalive ) { HttpHeaders . setContentLe... | Send just the status code without a body used for 204 or 304 |
7,379 | public Deferred < ArrayList < Object > > flush ( ) { final int size = size ( ) ; if ( size > 0 ) { LOG . info ( "Flushing all old outstanding rows out of " + size + " rows" ) ; } final long now = System . currentTimeMillis ( ) ; return flush ( now / 1000 - Const . MAX_TIMESPAN - 1 , Integer . MAX_VALUE ) ; } | Forces a flush of the all old entries in the compaction queue . |
7,380 | private Deferred < ArrayList < Object > > flush ( final long cut_off , int maxflushes ) { assert maxflushes > 0 : "maxflushes must be > 0, but I got " + maxflushes ; maxflushes = Math . min ( maxflushes , size ( ) ) ; if ( maxflushes == 0 ) { return Deferred . fromResult ( new ArrayList < Object > ( 0 ) ) ; } final Arr... | Flushes all the rows in the compaction queue older than the cutoff time . |
7,381 | void executeBulk ( final TSDB tsdb , final HttpMethod method , HttpQuery query ) { if ( method == HttpMethod . POST || method == HttpMethod . PUT ) { executeBulkUpdate ( tsdb , method , query ) ; } else if ( method == HttpMethod . DELETE ) { executeBulkDelete ( tsdb , query ) ; } else { throw new BadRequestException ( ... | Performs CRUD methods on a list of annotation objects to reduce calls to the API . |
7,382 | void executeBulkUpdate ( final TSDB tsdb , final HttpMethod method , HttpQuery query ) { final List < Annotation > notes ; try { notes = query . serializer ( ) . parseAnnotationsV1 ( ) ; } catch ( IllegalArgumentException e ) { throw new BadRequestException ( e ) ; } catch ( JSONException e ) { throw new BadRequestExce... | Performs CRU methods on a list of annotation objects to reduce calls to the API . Only supports body content and adding or updating annotation objects . Deletions are separate . |
7,383 | private AnnotationBulkDelete parseBulkDeleteQS ( final HttpQuery query ) { final AnnotationBulkDelete settings = new AnnotationBulkDelete ( ) ; settings . start_time = query . getRequiredQueryStringParam ( "start_time" ) ; settings . end_time = query . getQueryStringParam ( "end_time" ) ; if ( query . hasQueryStringPar... | Parses a query string for a bulk delet request |
7,384 | public static SearchType parseSearchType ( final String type ) { if ( type == null || type . isEmpty ( ) ) { throw new IllegalArgumentException ( "Type provided was null or empty" ) ; } if ( type . toLowerCase ( ) . equals ( "tsmeta" ) ) { return SearchType . TSMETA ; } else if ( type . toLowerCase ( ) . equals ( "tsme... | Converts the human readable string to the proper enum |
7,385 | private void populateNextRate ( ) { final MutableDataPoint prev_data = new MutableDataPoint ( ) ; if ( source . hasNext ( ) ) { prev_data . reset ( next_data ) ; next_data . reset ( source . next ( ) ) ; final long t0 = prev_data . timestamp ( ) ; final long t1 = next_data . timestamp ( ) ; if ( t1 <= t0 ) { throw new ... | Populate the next rate . |
7,386 | public long cacheSize ( ) { if ( use_lru ) { return ( int ) ( lru_name_cache . size ( ) + lru_id_cache . size ( ) ) ; } return name_cache . size ( ) + id_cache . size ( ) ; } | Returns the number of elements stored in the internal cache . |
7,387 | public void dropCaches ( ) { if ( use_lru ) { lru_name_cache . invalidateAll ( ) ; lru_id_cache . invalidateAll ( ) ; } else { name_cache . clear ( ) ; id_cache . clear ( ) ; } } | Causes this instance to discard all its in - memory caches . |
7,388 | public Deferred < String > getNameAsync ( final byte [ ] id ) { if ( id . length != id_width ) { throw new IllegalArgumentException ( "Wrong id.length = " + id . length + " which is != " + id_width + " required for '" + kind ( ) + '\'' ) ; } final String name = getNameFromCache ( id ) ; if ( name != null ) { incrementC... | Finds the name associated with a given ID . |
7,389 | private void cacheMapping ( final String name , final byte [ ] id ) { addIdToCache ( name , id ) ; addNameToCache ( id , name ) ; } | Adds the bidirectional mapping in the cache . |
7,390 | private static Scanner getSuggestScanner ( final HBaseClient client , final byte [ ] tsd_uid_table , final String search , final byte [ ] kind_or_null , final int max_results ) { final byte [ ] start_row ; final byte [ ] end_row ; if ( search . isEmpty ( ) ) { start_row = START_ROW ; end_row = END_ROW ; } else { start_... | Creates a scanner that scans the right range of rows for suggestions . |
7,391 | private void hbasePutWithRetry ( final PutRequest put , short attempts , short wait ) throws HBaseException { put . setBufferable ( false ) ; while ( attempts -- > 0 ) { try { client . put ( put ) . joinUninterruptibly ( ) ; return ; } catch ( HBaseException e ) { if ( attempts > 0 ) { LOG . error ( "Put failed, attemp... | Attempts to run the PutRequest given in argument retrying if needed . |
7,392 | public static long uidToLong ( final byte [ ] uid , final short uid_length ) { if ( uid . length != uid_length ) { throw new IllegalArgumentException ( "UID was " + uid . length + " bytes long but expected to be " + uid_length ) ; } final byte [ ] uid_raw = new byte [ 8 ] ; System . arraycopy ( uid , 0 , uid_raw , 8 - ... | Converts a UID to an integer value . The array must be the same length as uid_length or an exception will be thrown . |
7,393 | public static byte [ ] longToUID ( final long uid , final short width ) { final byte [ ] padded = Bytes . fromLong ( uid ) ; for ( int i = 0 ; i < padded . length - width ; i ++ ) { if ( padded [ i ] != 0 ) { final String message = "UID " + Long . toString ( uid ) + " was too large for " + width + " bytes" ; LOG . erro... | Converts a Long to a byte array with the proper UID width |
7,394 | public static void addIdToRegexp ( final StringBuilder buf , final byte [ ] id ) { boolean backslash = false ; for ( final byte b : id ) { buf . append ( ( char ) ( b & 0xFF ) ) ; if ( b == 'E' && backslash ) { buf . append ( "\\\\E\\Q" ) ; } else { backslash = b == '\\' ; } } buf . append ( "\\E" ) ; } | Appends the given UID to the given string buffer followed by \\ E . |
7,395 | public static UniqueIdType stringToUniqueIdType ( final String type ) { if ( type . toLowerCase ( ) . equals ( "metric" ) || type . toLowerCase ( ) . equals ( "metrics" ) ) { return UniqueIdType . METRIC ; } else if ( type . toLowerCase ( ) . equals ( "tagk" ) ) { return UniqueIdType . TAGK ; } else if ( type . toLower... | Attempts to convert the given string to a type enumerator |
7,396 | public static byte [ ] getTSUIDFromKey ( final byte [ ] row_key , final short metric_width , final short timestamp_width ) { int idx = 0 ; final int tag_pair_width = TSDB . tagk_width ( ) + TSDB . tagv_width ( ) ; final int tags_length = row_key . length - ( Const . SALT_WIDTH ( ) + metric_width + timestamp_width ) ; i... | Extracts the TSUID from a storage row key that includes the timestamp . |
7,397 | public static List < byte [ ] > getTagsFromTSUID ( final String tsuid ) { if ( tsuid == null || tsuid . isEmpty ( ) ) { throw new IllegalArgumentException ( "Missing TSUID" ) ; } if ( tsuid . length ( ) <= TSDB . metrics_width ( ) * 2 ) { throw new IllegalArgumentException ( "TSUID is too short, may be missing tags" ) ... | Extracts a list of tagks and tagvs as individual values in a list |
7,398 | public static Deferred < Map < String , Long > > getUsedUIDs ( final TSDB tsdb , final byte [ ] [ ] kinds ) { final class GetCB implements Callback < Map < String , Long > , ArrayList < KeyValue > > { public Map < String , Long > call ( final ArrayList < KeyValue > row ) throws Exception { final Map < String , Long > r... | Returns a map of max UIDs from storage for the given list of UID types |
7,399 | public static void preloadUidCache ( final TSDB tsdb , final ByteMap < UniqueId > uid_cache_map ) throws HBaseException { int max_results = tsdb . getConfig ( ) . getInt ( "tsd.core.preload_uid_cache.max_entries" ) ; LOG . info ( "Preloading uid cache with max_results=" + max_results ) ; if ( max_results <= 0 ) { retur... | Pre - load UID caches scanning up to tsd . core . preload_uid_cache . max_entries rows from the UID table . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.