idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
5,600
public void create ( File tebase ) throws TEException { try { create ( tebase . toString ( ) + File . separator ) ; } catch ( Exception e ) { throw e ; } }
Creates the main config file .
5,601
private List < File > getConfigFiles ( File dir ) { String [ ] extensions = { "xml" } ; List < File > configFiles = new ArrayList < File > ( ) ; Collection < File > files = FileUtils . listFiles ( dir , extensions , true ) ; for ( Iterator < File > iterator = files . iterator ( ) ; iterator . hasNext ( ) ; ) { File file = ( File ) iterator . next ( ) ; if ( file . getName ( ) . equals ( "config.xml" ) ) { configFiles . add ( file ) ; } } return configFiles ; }
Returns the all config file found under a directory
5,602
public boolean checkSchematronRules ( Document doc , String schemaFile , String phase ) throws Exception { boolean isValid = false ; if ( doc == null || doc . getDocumentElement ( ) == null ) return isValid ; try { ClassLoader loader = this . getClass ( ) . getClassLoader ( ) ; URL url = loader . getResource ( schemaFile ) ; this . schemaFile = new File ( URLDecoder . decode ( url . getFile ( ) , "UTF-8" ) ) ; } catch ( Exception e ) { assert false : "Entity body not found. " + e . toString ( ) ; } this . phase = phase ; Document returnDoc = parse ( doc , null , null ) ; if ( returnDoc != null ) { isValid = true ; } return isValid ; }
Checks the given schematron phase for the XML file and returns the validation status .
5,603
public boolean executeSchematronDriver ( InputSource inputDoc , File schemaFile , String phase ) { boolean isValid = false ; ValidationDriver driver = createSchematronDriver ( phase ) ; assert null != driver : "Unable to create Schematron ValidationDriver" ; InputSource is = null ; FileInputStream fis = null ; try { fis = new FileInputStream ( schemaFile ) ; is = new InputSource ( fis ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } try { if ( driver . loadSchema ( is ) ) { isValid = driver . validate ( inputDoc ) ; fis . close ( ) ; } else { assert false : ( "Failed to load Schematron schema: " + schemaFile + "\nIs the schema valid? Is the phase defined?" ) ; } } catch ( SAXException e ) { assert false : e . toString ( ) ; } catch ( IOException e ) { assert false : e . toString ( ) ; } return isValid ; }
Runs the schematron file against the input source .
5,604
Document parse ( Document doc , Element instruction , PrintWriter logger ) throws Exception { this . outputLogger = logger ; if ( instruction != null ) { getFileType ( instruction ) ; if ( type . equals ( "url" ) ) { URL schemaURL = new URL ( this . schemaLocation ) ; this . schemaFile = new File ( schemaURL . toURI ( ) ) ; } else if ( type . equals ( "file" ) ) { this . schemaFile = new File ( this . schemaLocation ) ; } else if ( type . equals ( "resource" ) ) { URL url = this . getClass ( ) . getResource ( this . schemaLocation ) ; this . schemaFile = new File ( URLDecoder . decode ( url . getFile ( ) , "UTF-8" ) ) ; } } boolean isValid = false ; if ( doc != null ) { InputSource xmlInputSource = null ; try { InputStream inputStream = DocumentToInputStream ( doc ) ; xmlInputSource = new InputSource ( inputStream ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } isValid = executeSchematronDriver ( xmlInputSource , this . schemaFile , this . phase ) ; } if ( ! isValid ) { return null ; } else { return doc ; } }
Checks the given Document against a Schematron schema . A schema reference is conveyed by a DOM Element node as indicated below .
5,605
ValidationDriver createDriver ( PropertyMap configProps ) { SchemaReaderLoader loader = new SchemaReaderLoader ( ) ; SchemaReader schReader = loader . createSchemaReader ( SCHEMATRON_NS_URI ) ; ValidationDriver validator = new ValidationDriver ( configProps , schReader ) ; return validator ; }
Creates and initializes a ValidationDriver to perform Schematron validation . A schema must be loaded before an instance can be validated .
5,606
public static int getImageWidth ( String imageLoc ) { InputStream is = null ; try { URI imageUri = new URI ( imageLoc ) ; URL imageUrl = imageUri . toURL ( ) ; is = imageUrl . openStream ( ) ; } catch ( Exception e ) { jlogger . log ( Level . SEVERE , "getImageWidth" , e ) ; return - 1 ; } try { ImageInputStream iis = ImageIO . createImageInputStream ( is ) ; Iterator iter = ImageIO . getImageReaders ( iis ) ; if ( ! iter . hasNext ( ) ) { return - 1 ; } ImageReader reader = ( ImageReader ) iter . next ( ) ; reader . setInput ( iis , true ) ; int width = reader . getWidth ( 0 ) ; iis . close ( ) ; return width ; } catch ( IOException e ) { jlogger . log ( Level . SEVERE , "getImageWidth" , e ) ; } return - 1 ; }
Determines the width of the first image in an image file in pixels .
5,607
public static PrintWriter createLog ( File logDir , String callpath ) throws Exception { if ( logDir != null ) { File dir = new File ( logDir , callpath ) ; String path = logDir . toString ( ) + "/" + callpath . split ( "/" ) [ 0 ] ; System . setProperty ( "PATH" , path ) ; dir . mkdir ( ) ; File f = new File ( dir , "log.xml" ) ; f . delete ( ) ; BufferedWriter writer = new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( f ) , "UTF-8" ) ) ; return new PrintWriter ( writer ) ; } return null ; }
Creates a Writer used to write test results to the log . xml file .
5,608
public static Document readLog ( File logDir , String callpath ) throws Exception { File dir = new File ( logDir , callpath ) ; File f = new File ( dir , "log.xml" ) ; if ( f . exists ( ) ) { DocumentBuilderFactory dbf = DocumentBuilderFactory . newInstance ( ) ; dbf . setNamespaceAware ( true ) ; dbf . setExpandEntityReferences ( false ) ; DocumentBuilder db = dbf . newDocumentBuilder ( ) ; Document doc = db . newDocument ( ) ; TransformerFactory tf = TransformerFactory . newInstance ( ) ; tf . setFeature ( XMLConstants . FEATURE_SECURE_PROCESSING , true ) ; Transformer t = tf . newTransformer ( ) ; t . setErrorListener ( new com . occamlab . te . NullErrorListener ( ) ) ; try { t . transform ( new StreamSource ( f ) , new DOMResult ( doc ) ) ; } catch ( Exception e ) { RandomAccessFile raf = new RandomAccessFile ( f , "r" ) ; int l = new Long ( raf . length ( ) ) . intValue ( ) ; byte [ ] buf = new byte [ l + 8 ] ; raf . read ( buf ) ; raf . close ( ) ; buf [ l ] = '\n' ; buf [ l + 1 ] = '<' ; buf [ l + 2 ] = '/' ; buf [ l + 3 ] = 'l' ; buf [ l + 4 ] = 'o' ; buf [ l + 5 ] = 'g' ; buf [ l + 6 ] = '>' ; buf [ l + 7 ] = '\n' ; doc = db . newDocument ( ) ; tf . newTransformer ( ) . transform ( new StreamSource ( new ByteArrayInputStream ( buf ) ) , new DOMResult ( doc ) ) ; } return doc ; } else { return null ; } }
Reads a log from disk
5,609
public static XdmNode getContextFromLog ( net . sf . saxon . s9api . DocumentBuilder builder , Document log ) throws Exception { Element starttest = ( Element ) log . getElementsByTagName ( "starttest" ) . item ( 0 ) ; NodeList nl = starttest . getElementsByTagName ( "context" ) ; if ( nl == null || nl . getLength ( ) == 0 ) { return null ; } else { Element context = ( Element ) nl . item ( 0 ) ; Element value = ( Element ) context . getElementsByTagName ( "value" ) . item ( 0 ) ; nl = value . getChildNodes ( ) ; for ( int i = 0 ; i < nl . getLength ( ) ; i ++ ) { Node n = nl . item ( i ) ; if ( n . getNodeType ( ) == Node . ATTRIBUTE_NODE ) { String s = DomUtils . serializeNode ( value ) ; XdmNode xn = builder . build ( new StreamSource ( new CharArrayReader ( s . toCharArray ( ) ) ) ) ; return ( XdmNode ) xn . axisIterator ( Axis . ATTRIBUTE ) . next ( ) ; } else if ( n . getNodeType ( ) == Node . ELEMENT_NODE ) { Document doc = DomUtils . createDocument ( n ) ; return builder . build ( new DOMSource ( doc ) ) ; } } } return null ; }
Returns the context node for a test from its log document
5,610
public static void createFullReportLog ( String sessionLogDir ) throws Exception { File xml_logs_report_file = new File ( sessionLogDir + File . separator + "report_logs.xml" ) ; if ( xml_logs_report_file . exists ( ) ) { xml_logs_report_file . delete ( ) ; xml_logs_report_file . createNewFile ( ) ; } xml_logs_report_file = new File ( sessionLogDir + File . separator + "report_logs.xml" ) ; OutputStream report_logs = new FileOutputStream ( xml_logs_report_file ) ; List < File > files = null ; Document result = null ; files = getFileListing ( new File ( sessionLogDir ) ) ; DocumentBuilderFactory factory = DocumentBuilderFactory . newInstance ( ) ; factory . setNamespaceAware ( true ) ; factory . setExpandEntityReferences ( false ) ; DocumentBuilder builder = factory . newDocumentBuilder ( ) ; Document doc = builder . newDocument ( ) ; Element execution = doc . createElement ( "execution" ) ; execution . setAttributeNS ( "http://www.w3.org/2000/xmlns/" , "xmlns:xi" , "http://www.w3.org/2001/XInclude" ) ; doc . appendChild ( execution ) ; for ( File file : files ) { Element include = doc . createElementNS ( "http://www.w3.org/2001/XInclude" , "xi:include" ) ; include . setAttribute ( "href" , file . getAbsolutePath ( ) ) ; execution . appendChild ( include ) ; } TransformerFactory xformFactory = TransformerFactory . newInstance ( ) ; xformFactory . setFeature ( XMLConstants . FEATURE_SECURE_PROCESSING , true ) ; Transformer idTransform = xformFactory . newTransformer ( ) ; Source input = new DOMSource ( doc ) ; Result output = new StreamResult ( report_logs ) ; idTransform . transform ( input , output ) ; report_logs . close ( ) ; result = doc ; }
Generate a file in logDir refererring all logfiles . Create a file called report_logs . xml in the log folder that includes all logs listed inside the directory .
5,611
private static List < File > getFileListing ( File logDir ) throws Exception { List < File > result = getFileListingLogs ( logDir ) ; return result ; }
Recursively walk a directory tree and return a List of all log files found .
5,612
static private List < File > getFileListingLogs ( File aStartingDir ) throws Exception { List < File > result = new ArrayList < File > ( ) ; File [ ] logfiles = aStartingDir . listFiles ( new FileFilter ( ) { public boolean accept ( File pathname ) { return pathname . isFile ( ) ; } } ) ; List < File > logFilesList = Arrays . asList ( logfiles ) ; File [ ] allDirs = aStartingDir . listFiles ( new FileFilter ( ) { public boolean accept ( File pathname ) { return pathname . isDirectory ( ) ; } } ) ; for ( File file : logFilesList ) { if ( file . getName ( ) . equals ( "log.xml" ) ) { result . add ( file ) ; } } List < File > allDirsList = Arrays . asList ( allDirs ) ; Collections . sort ( allDirsList , new Comparator < File > ( ) { public int compare ( File o1 , File o2 ) { if ( o1 . lastModified ( ) > o2 . lastModified ( ) ) { return + 1 ; } else if ( o1 . lastModified ( ) < o2 . lastModified ( ) ) { return - 1 ; } else { return 0 ; } } } ) ; for ( File file : allDirsList ) { if ( ! file . isFile ( ) ) { List < File > deeperList = getFileListingLogs ( file ) ; result . addAll ( deeperList ) ; } } return result ; }
Get all log files and directories and make recursive call .
5,613
public void writeCoverageResults ( ) { File coverageFile = new File ( this . testSessionDir , ICS_MAP . get ( this . requestId ) ) ; if ( coverageFile . exists ( ) ) { return ; } OutputStream fos = null ; try { fos = new FileOutputStream ( coverageFile , false ) ; writeDocument ( fos , this . coverageDoc ) ; } catch ( FileNotFoundException e ) { } finally { try { if ( fos != null ) { fos . close ( ) ; LOGR . config ( "Wrote coverage results to " + coverageFile . getCanonicalPath ( ) ) ; } else { } } catch ( IOException ioe ) { LOGR . warning ( ioe . getMessage ( ) ) ; } } }
Writes the residual ICS document to a file in the test session directory .
5,614
void writeDocument ( OutputStream outStream , Document doc ) { DOMImplementationRegistry domRegistry = null ; try { domRegistry = DOMImplementationRegistry . newInstance ( ) ; DOMImplementationLS impl = ( DOMImplementationLS ) domRegistry . getDOMImplementation ( "LS" ) ; LSSerializer writer = impl . createLSSerializer ( ) ; writer . getDomConfig ( ) . setParameter ( "xml-declaration" , false ) ; writer . getDomConfig ( ) . setParameter ( "format-pretty-print" , true ) ; LSOutput output = impl . createLSOutput ( ) ; output . setEncoding ( "UTF-8" ) ; output . setByteStream ( outStream ) ; writer . write ( doc , output ) ; } catch ( Exception e ) { LOGR . warning ( e . getMessage ( ) ) ; } }
Writes a DOM Document to the given OutputStream using the UTF - 8 encoding . The XML declaration is omitted .
5,615
public static SAXParser createXIncludeAwareSAXParser ( boolean doBaseURIFixup ) { SAXParserFactory factory = SAXParserFactory . newInstance ( ) ; factory . setNamespaceAware ( true ) ; factory . setXIncludeAware ( true ) ; SAXParser parser = null ; try { factory . setFeature ( Constants . XERCES_FEATURE_PREFIX + Constants . XINCLUDE_FIXUP_BASE_URIS_FEATURE , doBaseURIFixup ) ; parser = factory . newSAXParser ( ) ; } catch ( Exception x ) { throw new RuntimeException ( x ) ; } return parser ; }
Creates a SAXParser that is configured to resolve XInclude references but not perform schema validation .
5,616
public void contextInitialized ( ServletContextEvent evt ) { File usersDir = new File ( SetupOptions . getBaseConfigDirectory ( ) , "users" ) ; if ( ! usersDir . isDirectory ( ) ) { return ; } DocumentBuilder domBuilder = null ; try { domBuilder = DocumentBuilderFactory . newInstance ( ) . newDocumentBuilder ( ) ; } catch ( ParserConfigurationException e ) { LOGR . warning ( e . getMessage ( ) ) ; return ; } DOMImplementationLS lsFactory = buildDOM3LoadAndSaveFactory ( ) ; LSSerializer serializer = lsFactory . createLSSerializer ( ) ; serializer . getDomConfig ( ) . setParameter ( Constants . DOM_XMLDECL , Boolean . FALSE ) ; serializer . getDomConfig ( ) . setParameter ( Constants . DOM_FORMAT_PRETTY_PRINT , Boolean . TRUE ) ; LSOutput output = lsFactory . createLSOutput ( ) ; output . setEncoding ( "UTF-8" ) ; for ( File userDir : usersDir . listFiles ( ) ) { File userFile = new File ( userDir , "user.xml" ) ; if ( ! userFile . isFile ( ) ) { continue ; } try { Document doc = domBuilder . parse ( userFile ) ; Node pwNode = doc . getElementsByTagName ( "password" ) . item ( 0 ) ; if ( null == pwNode ) { continue ; } String password = pwNode . getTextContent ( ) ; if ( password . split ( ":" ) . length == 5 ) { break ; } pwNode . setTextContent ( PasswordStorage . createHash ( password ) ) ; FileOutputStream os = new FileOutputStream ( userFile , false ) ; output . setByteStream ( os ) ; serializer . write ( doc , output ) ; os . close ( ) ; } catch ( Exception e ) { LOGR . info ( e . getMessage ( ) ) ; continue ; } } }
Checks that passwords in the user files are not in clear text . If a hash value is found for some user it is assumed that all user passwords have previously been hashed and no further checks are done .
5,617
DOMImplementationLS buildDOM3LoadAndSaveFactory ( ) { DOMImplementationLS factory = null ; try { DOMImplementationRegistry domRegistry = DOMImplementationRegistry . newInstance ( ) ; factory = ( DOMImplementationLS ) domRegistry . getDOMImplementation ( "LS 3.0" ) ; } catch ( Exception e ) { LOGR . log ( Level . WARNING , "Failed to create DOMImplementationLS" , e ) ; } return factory ; }
Builds a DOMImplementationLS factory that supports the DOM Level 3 Load and Save specification . It provides various factory methods for creating the objects required for loading and saving DOM nodes .
5,618
public static String randomString ( int len , Random random ) { if ( len < 1 ) { return "" ; } int start = ' ' ; int end = 'z' + 1 ; StringBuffer buffer = new StringBuffer ( ) ; int gap = end - start ; while ( len -- != 0 ) { char ch ; ch = ( char ) ( random . nextInt ( gap ) + start ) ; if ( Character . isLetterOrDigit ( ch ) ) { buffer . append ( ch ) ; } else { len ++ ; } } return buffer . toString ( ) ; }
Returns a random string of a certain length
5,619
public static String generateMD5 ( String text ) { byte [ ] md5hash = null ; try { MessageDigest md ; md = MessageDigest . getInstance ( "MD5" ) ; md5hash = new byte [ 8 ] ; md . update ( text . getBytes ( "iso-8859-1" ) , 0 , text . length ( ) ) ; md5hash = md . digest ( ) ; } catch ( Exception e ) { jlogger . log ( Level . SEVERE , "Error generating MD5: " + e . getMessage ( ) , e ) ; System . out . println ( "Error generating MD5: " + e . getMessage ( ) ) ; return "" ; } return convertToHex ( md5hash ) ; }
Uses MD5 to create a hash value for the given String
5,620
static public URLConnection build_soap_request ( Node xml ) throws Exception { String sUrl = null ; String method = "POST" ; String charset = ( ( Element ) xml ) . getAttribute ( "charset" ) . equals ( "" ) ? ( ( Element ) xml ) . getAttribute ( "charset" ) : "UTF-8" ; String version = ( ( Element ) xml ) . getAttribute ( "version" ) ; String action = "" ; String contentType = "" ; Element body = null ; NodeList nl = xml . getChildNodes ( ) ; for ( int i = 0 ; i < nl . getLength ( ) ; i ++ ) { Node n = nl . item ( i ) ; if ( n . getNodeType ( ) == Node . ELEMENT_NODE ) { if ( n . getLocalName ( ) . equals ( "url" ) ) { sUrl = n . getTextContent ( ) ; } else if ( n . getLocalName ( ) . equals ( "action" ) ) { action = n . getTextContent ( ) ; } else if ( n . getLocalName ( ) . equals ( "body" ) ) { body = ( org . w3c . dom . Element ) n ; } } } List < Element > headerBloks = DomUtils . getElementsByTagNameNS ( xml , CTL_NS , HEADER_BLOCKS ) ; URLConnection uc = new URL ( sUrl ) . openConnection ( ) ; if ( uc instanceof HttpURLConnection ) { ( ( HttpURLConnection ) uc ) . setRequestMethod ( method ) ; } uc . setDoOutput ( true ) ; byte [ ] bytes = null ; bytes = SoapUtils . getSoapMessageAsByte ( version , headerBloks , body , charset ) ; uc . setRequestProperty ( "User-Agent" , "Team Engine 1.2" ) ; uc . setRequestProperty ( "Cache-Control" , "no-cache" ) ; uc . setRequestProperty ( "Pragma" , "no-cache" ) ; uc . setRequestProperty ( "charset" , charset ) ; uc . setRequestProperty ( "Content-Length" , Integer . toString ( bytes . length ) ) ; if ( version . equals ( SOAP_V_1_1 ) ) { uc . setRequestProperty ( "Accept" , "text/xml" ) ; uc . setRequestProperty ( "SOAPAction" , action ) ; contentType = "text/xml" ; if ( ! charset . equals ( "" ) ) { contentType = contentType + "; charset=" + charset ; } uc . setRequestProperty ( "Content-Type" , contentType ) ; } else { uc . setRequestProperty ( "Accept" , "application/soap+xml" ) ; contentType = "application/soap+xml" ; if ( ! charset . equals ( "" ) ) { contentType = contentType + "; charset=" + charset ; } if ( ! action . equals ( "" ) ) { contentType = contentType + "; action=" + action ; } uc . setRequestProperty ( "Content-Type" , contentType ) ; } OutputStream os = uc . getOutputStream ( ) ; os . write ( bytes ) ; return uc ; }
Create SOAP request sends it and return an URL Connection ready to be parsed .
5,621
public Element parse ( URLConnection uc , Node instruction ) throws Throwable { DocumentBuilderFactory dbf = DocumentBuilderFactory . newInstance ( ) ; dbf . setNamespaceAware ( true ) ; dbf . setExpandEntityReferences ( false ) ; DocumentBuilder db = dbf . newDocumentBuilder ( ) ; Document response_doc = db . newDocument ( ) ; return parse ( uc , instruction , response_doc ) ; }
Parses the content retrieved from some URI and builds a DOM Document containing information extracted from the response message . Subsidiary parsers are invoked in accord with the supplied parser instructions .
5,622
public Document findXMLResource ( String name ) { URL url = this . getClass ( ) . getResource ( name ) ; DocumentBuilderFactory docFactory = DocumentBuilderFactory . newInstance ( ) ; docFactory . setNamespaceAware ( true ) ; docFactory . setExpandEntityReferences ( false ) ; Document doc = null ; try { DocumentBuilder docBuilder = docFactory . newDocumentBuilder ( ) ; doc = docBuilder . parse ( url . toURI ( ) . toString ( ) ) ; } catch ( Exception e ) { LOGR . log ( Level . WARNING , "Failed to parse classpath resource " + name , e ) ; } return doc ; }
Builds a DOM Document representing a classpath resource .
5,623
public static File earlHtmlReport ( String outputDir ) throws FileNotFoundException { ClassLoader cl = Thread . currentThread ( ) . getContextClassLoader ( ) ; String resourceDir = cl . getResource ( "com/occamlab/te/earl/lib" ) . getPath ( ) ; String earlXsl = cl . getResource ( "com/occamlab/te/earl_html_report.xsl" ) . toString ( ) ; File htmlOutput = new File ( outputDir , "result" ) ; htmlOutput . mkdir ( ) ; LOGR . fine ( "HTML output is written to directory " + htmlOutput ) ; File earlResult = new File ( outputDir , "earl-results.rdf" ) ; try { TransformerFactory tf = TransformerFactory . newInstance ( ) ; Transformer transformer = tf . newTransformer ( new StreamSource ( earlXsl ) ) ; transformer . setParameter ( "outputDir" , htmlOutput ) ; File indexHtml = new File ( htmlOutput , "index.html" ) ; indexHtml . createNewFile ( ) ; FileOutputStream outputStream = new FileOutputStream ( indexHtml ) ; transformer . transform ( new StreamSource ( earlResult ) , new StreamResult ( outputStream ) ) ; outputStream . close ( ) ; FileUtils . copyDirectory ( new File ( resourceDir ) , htmlOutput ) ; } catch ( Exception e ) { LOGR . log ( Level . SEVERE , "Transformation of EARL to HTML failed." , e ) ; throw new RuntimeException ( e ) ; } if ( ! htmlOutput . exists ( ) ) { throw new FileNotFoundException ( "HTML results not found at " + htmlOutput . getAbsolutePath ( ) ) ; } return htmlOutput ; }
Convert EARL result into HTML report .
5,624
private static void addDir ( File dirObj , ZipOutputStream out ) throws IOException { File [ ] dirList = dirObj . listFiles ( ) ; byte [ ] tmpBuf = new byte [ 1024 ] ; for ( int i = 0 ; i < dirList . length ; i ++ ) { if ( dirList [ i ] . isDirectory ( ) ) { addDir ( dirList [ i ] , out ) ; continue ; } FileInputStream in = new FileInputStream ( dirList [ i ] . getAbsolutePath ( ) ) ; System . out . println ( " Adding: " + dirList [ i ] . getAbsolutePath ( ) ) ; out . putNextEntry ( new ZipEntry ( dirList [ i ] . getAbsolutePath ( ) ) ) ; int len ; while ( ( len = in . read ( tmpBuf ) ) > 0 ) { out . write ( tmpBuf , 0 , len ) ; } out . closeEntry ( ) ; in . close ( ) ; } }
Add directory to zip file
5,625
private Observable < ProposedBucketConfigContext > buildRefreshFallbackSequence ( List < NodeInfo > nodeInfos , String bucketName ) { Observable < ProposedBucketConfigContext > failbackSequence = null ; for ( final NodeInfo nodeInfo : nodeInfos ) { if ( ! isValidCarrierNode ( environment . sslEnabled ( ) , nodeInfo ) ) { continue ; } if ( failbackSequence == null ) { failbackSequence = refreshAgainstNode ( bucketName , nodeInfo . hostname ( ) ) ; } else { failbackSequence = failbackSequence . onErrorResumeNext ( refreshAgainstNode ( bucketName , nodeInfo . hostname ( ) ) ) ; } } if ( failbackSequence == null ) { LOGGER . debug ( "Could not build refresh sequence, node list is empty - ignoring attempt." ) ; return Observable . empty ( ) ; } return failbackSequence ; }
Helper method which builds the refresh fallback sequence based on the node list .
5,626
< T > void shiftNodeList ( List < T > nodeList ) { int shiftBy = ( int ) ( nodeOffset ++ % nodeList . size ( ) ) ; for ( int i = 0 ; i < shiftBy ; i ++ ) { T element = nodeList . remove ( 0 ) ; nodeList . add ( element ) ; } }
Helper method to transparently rearrange the node list based on the current global offset .
5,627
private static boolean isValidCarrierNode ( final boolean sslEnabled , final NodeInfo nodeInfo ) { if ( sslEnabled && nodeInfo . sslServices ( ) . containsKey ( ServiceType . BINARY ) ) { return true ; } else if ( nodeInfo . services ( ) . containsKey ( ServiceType . BINARY ) ) { return true ; } return false ; }
Helper method to detect if the given node can actually perform carrier refresh .
5,628
private boolean allowedToPoll ( final String bucket ) { Long bucketLastPollTimestamp = lastPollTimestamps . get ( bucket ) ; return bucketLastPollTimestamp == null || ( ( System . nanoTime ( ) - bucketLastPollTimestamp ) >= pollFloorNs ) ; }
Returns true if polling is allowed false if we are below the configured floor poll interval .
5,629
public boolean process ( byte value ) throws Exception { if ( value == ( byte ) 'e' || value == ( byte ) 'E' ) { return true ; } if ( value >= ( byte ) '0' && value <= ( byte ) '9' ) { return true ; } return value == JSON_MINUS || value == JSON_PLUS || value == ( byte ) '.' ; }
not verifying if valid
5,630
private static boolean bucketHasFastForwardMap ( String bucketName , ClusterConfig clusterConfig ) { if ( bucketName == null ) { return false ; } BucketConfig bucketConfig = clusterConfig . bucketConfig ( bucketName ) ; return bucketConfig != null && bucketConfig . hasFastForwardMap ( ) ; }
Helper method to check if the current given bucket contains a fast forward map .
5,631
public static ResponseStatus fromBinary ( final short code ) { KeyValueStatus status = KeyValueStatus . valueOf ( code ) ; switch ( status ) { case SUCCESS : return ResponseStatus . SUCCESS ; case ERR_EXISTS : return ResponseStatus . EXISTS ; case ERR_NOT_FOUND : return ResponseStatus . NOT_EXISTS ; case ERR_NOT_MY_VBUCKET : return ResponseStatus . RETRY ; case ERR_NOT_STORED : return ResponseStatus . NOT_STORED ; case ERR_TOO_BIG : return ResponseStatus . TOO_BIG ; case ERR_TEMP_FAIL : return ResponseStatus . TEMPORARY_FAILURE ; case ERR_BUSY : return ResponseStatus . SERVER_BUSY ; case ERR_NO_MEM : return ResponseStatus . OUT_OF_MEMORY ; case ERR_UNKNOWN_COMMAND : return ResponseStatus . COMMAND_UNAVAILABLE ; case ERR_NOT_SUPPORTED : return ResponseStatus . COMMAND_UNAVAILABLE ; case ERR_ACCESS : return ResponseStatus . ACCESS_ERROR ; case ERR_INTERNAL : return ResponseStatus . INTERNAL_ERROR ; case ERR_INVALID : return ResponseStatus . INVALID_ARGUMENTS ; case ERR_DELTA_BADVAL : return ResponseStatus . INVALID_ARGUMENTS ; case ERR_RANGE : return ResponseStatus . RANGE_ERROR ; case ERR_ROLLBACK : return ResponseStatus . ROLLBACK ; case ERR_SUBDOC_PATH_NOT_FOUND : return ResponseStatus . SUBDOC_PATH_NOT_FOUND ; case ERR_SUBDOC_PATH_MISMATCH : return ResponseStatus . SUBDOC_PATH_MISMATCH ; case ERR_SUBDOC_PATH_INVALID : return ResponseStatus . SUBDOC_PATH_INVALID ; case ERR_SUBDOC_PATH_TOO_BIG : return ResponseStatus . SUBDOC_PATH_TOO_BIG ; case ERR_SUBDOC_DOC_TOO_DEEP : return ResponseStatus . SUBDOC_DOC_TOO_DEEP ; case ERR_SUBDOC_VALUE_CANTINSERT : return ResponseStatus . SUBDOC_VALUE_CANTINSERT ; case ERR_SUBDOC_DOC_NOT_JSON : return ResponseStatus . SUBDOC_DOC_NOT_JSON ; case ERR_SUBDOC_NUM_RANGE : return ResponseStatus . SUBDOC_NUM_RANGE ; case ERR_SUBDOC_DELTA_RANGE : return ResponseStatus . SUBDOC_DELTA_RANGE ; case ERR_SUBDOC_PATH_EXISTS : return ResponseStatus . SUBDOC_PATH_EXISTS ; case ERR_SUBDOC_VALUE_TOO_DEEP : return ResponseStatus . SUBDOC_VALUE_TOO_DEEP ; case ERR_SUBDOC_INVALID_COMBO : return ResponseStatus . SUBDOC_INVALID_COMBO ; case ERR_SUBDOC_MULTI_PATH_FAILURE : return ResponseStatus . SUBDOC_MULTI_PATH_FAILURE ; case ERR_SUBDOC_XATTR_INVALID_FLAG_COMBO : return ResponseStatus . INTERNAL_ERROR ; case ERR_SUBDOC_XATTR_UNKNOWN_MACRO : return ResponseStatus . SUBDOC_XATTR_UNKNOWN_MACRO ; case ERR_SUBDOC_XATTR_INVALID_KEY_COMBO : return ResponseStatus . SUBDOC_XATTR_INVALID_KEY_COMBO ; } return ResponseStatus . FAILURE ; }
Convert the binary protocol status in a typesafe enum that can be acted upon later .
5,632
public static ErrorMap . ErrorCode readErrorCodeFromErrorMap ( final short code ) { if ( BINARY_ERROR_MAP == null ) { LOGGER . trace ( "Binary error map unavailable" ) ; return null ; } ErrorMap . ErrorCode result = BINARY_ERROR_MAP . errors ( ) . get ( code ) ; return result ; }
Get the error code from Key Value error map
5,633
public static ResponseStatus fromHttp ( final int code ) { ResponseStatus status ; switch ( code ) { case HTTP_OK : case HTTP_CREATED : case HTTP_ACCEPTED : status = ResponseStatus . SUCCESS ; break ; case HTTP_NOT_FOUND : status = ResponseStatus . NOT_EXISTS ; break ; case HTTP_BAD_REQUEST : status = ResponseStatus . INVALID_ARGUMENTS ; break ; case HTTP_INTERNAL_ERROR : status = ResponseStatus . INTERNAL_ERROR ; break ; case HTTP_UNAUTHORIZED : status = ResponseStatus . ACCESS_ERROR ; break ; case HTTP_TOO_MANY_REQUESTS : status = ResponseStatus . FAILURE ; break ; default : LOGGER . warn ( "Unknown ResponseStatus with Protocol HTTP: {}" , code ) ; status = ResponseStatus . FAILURE ; } return status ; }
Convert the http protocol status in a typesafe enum that can be acted upon later .
5,634
public static void updateBinaryErrorMap ( final ErrorMap map ) { if ( map == null ) { return ; } if ( BINARY_ERROR_MAP == null || map . compareTo ( BINARY_ERROR_MAP ) > 0 ) { BINARY_ERROR_MAP = map ; } }
Updates the current error map in use for all uses of the response status converter .
5,635
public static < T1 , T2 > Tuple2 < T1 , T2 > create ( final T1 v1 , final T2 v2 ) { return new Tuple2 < T1 , T2 > ( v1 , v2 ) ; }
Creates a tuple with two values .
5,636
public static < T1 , T2 , T3 > Tuple3 < T1 , T2 , T3 > create ( final T1 v1 , final T2 v2 , final T3 v3 ) { return new Tuple3 < T1 , T2 , T3 > ( v1 , v2 , v3 ) ; }
Creates a tuple with three values .
5,637
public static < T1 , T2 , T3 , T4 > Tuple4 < T1 , T2 , T3 , T4 > create ( final T1 v1 , final T2 v2 , final T3 v3 , final T4 v4 ) { return new Tuple4 < T1 , T2 , T3 , T4 > ( v1 , v2 , v3 , v4 ) ; }
Creates a tuple with four values .
5,638
public static < T1 , T2 , T3 , T4 , T5 > Tuple5 < T1 , T2 , T3 , T4 , T5 > create ( final T1 v1 , final T2 v2 , final T3 v3 , final T4 v4 , final T5 v5 ) { return new Tuple5 < T1 , T2 , T3 , T4 , T5 > ( v1 , v2 , v3 , v4 , v5 ) ; }
Creates a tuple with five values .
5,639
public String exportToJson ( boolean pretty ) { Map < String , Object > result = new HashMap < String , Object > ( ) ; Map < String , List < Map < String , Object > > > services = new HashMap < String , List < Map < String , Object > > > ( ) ; for ( EndpointHealth h : endpoints ) { String type = serviceTypeFromEnum ( h . type ( ) ) ; if ( ! services . containsKey ( type ) ) { services . put ( type , new ArrayList < Map < String , Object > > ( ) ) ; } List < Map < String , Object > > eps = services . get ( type ) ; eps . add ( h . toMap ( ) ) ; } result . put ( "version" , version ) ; result . put ( "services" , services ) ; result . put ( "sdk" , sdk ) ; result . put ( "id" , id ) ; try { if ( pretty ) { return DefaultObjectMapper . prettyWriter ( ) . writeValueAsString ( result ) ; } else { return DefaultObjectMapper . writeValueAsString ( result ) ; } } catch ( JsonProcessingException e ) { throw new IllegalStateException ( "Could not encode as JSON string." , e ) ; } }
Exports this report into the standard JSON format which is consistent across different language SDKs .
5,640
private void upsertBucketConfig ( final BucketConfig newConfig ) { ClusterConfig cluster = currentConfig ; BucketConfig oldConfig = cluster . bucketConfig ( newConfig . name ( ) ) ; if ( newConfig . rev ( ) > 0 && oldConfig != null && newConfig . rev ( ) <= oldConfig . rev ( ) ) { LOGGER . trace ( "Not applying new configuration, older or same rev ID." ) ; return ; } if ( newConfig . password ( ) == null && oldConfig != null ) { newConfig . password ( oldConfig . password ( ) ) ; } if ( oldConfig != null ) { newConfig . username ( oldConfig . username ( ) ) ; } if ( oldConfig == null ) { externalNetwork = determineNetworkResolution ( newConfig , environment . networkResolution ( ) , seedHosts ) ; LOGGER . info ( "Selected network configuration: {}" , externalNetwork != null ? externalNetwork : "default" ) ; } if ( externalNetwork != null ) { newConfig . useAlternateNetwork ( externalNetwork ) ; } cluster . setBucketConfig ( newConfig . name ( ) , newConfig ) ; LOGGER . debug ( "Applying new configuration {}" , newConfig ) ; currentConfig = cluster ; boolean tainted = newConfig . tainted ( ) ; for ( Refresher refresher : refreshers . values ( ) ) { if ( tainted ) { refresher . markTainted ( newConfig ) ; } else { refresher . markUntainted ( newConfig ) ; } } configObservable . onNext ( currentConfig ) ; }
Helper method which takes the given bucket config and applies it to the cluster config .
5,641
public static String determineNetworkResolution ( final BucketConfig config , final NetworkResolution nr , final Set < NetworkAddress > seedHosts ) { if ( nr . equals ( NetworkResolution . DEFAULT ) ) { return null ; } else if ( nr . equals ( NetworkResolution . AUTO ) ) { for ( NodeInfo info : config . nodes ( ) ) { if ( seedHosts . contains ( info . hostname ( ) ) ) { return null ; } Map < String , AlternateAddress > aa = info . alternateAddresses ( ) ; if ( aa != null && ! aa . isEmpty ( ) ) { for ( Map . Entry < String , AlternateAddress > entry : aa . entrySet ( ) ) { AlternateAddress alternateAddress = entry . getValue ( ) ; if ( alternateAddress != null && seedHosts . contains ( alternateAddress . hostname ( ) ) ) { return entry . getKey ( ) ; } } } } return null ; } else { return nr . name ( ) ; } }
Helper method to figure out which network resolution should be used .
5,642
private static Observable < PingServiceHealth > mapToServiceHealth ( final String scope , final ServiceType type , final Observable < ? extends CouchbaseResponse > input , final AtomicReference < CouchbaseRequest > request , final long timeout , final TimeUnit timeUnit ) { return input . map ( new Func1 < CouchbaseResponse , PingServiceHealth > ( ) { public PingServiceHealth call ( CouchbaseResponse response ) { DiagnosticRequest request = ( DiagnosticRequest ) response . request ( ) ; String id = "0x" + Integer . toHexString ( request . localSocket ( ) . hashCode ( ) ) ; return new PingServiceHealth ( type , PingServiceHealth . PingState . OK , id , TimeUnit . NANOSECONDS . toMicros ( System . nanoTime ( ) - response . request ( ) . creationTime ( ) ) , request . localSocket ( ) , request . remoteSocket ( ) , scope ) ; } } ) . onErrorReturn ( new Func1 < Throwable , PingServiceHealth > ( ) { public PingServiceHealth call ( Throwable throwable ) { SocketAddress local = ( ( DiagnosticRequest ) request . get ( ) ) . localSocket ( ) ; SocketAddress remote = ( ( DiagnosticRequest ) request . get ( ) ) . remoteSocket ( ) ; String id = local == null ? "0x0000" : "0x" + Integer . toHexString ( local . hashCode ( ) ) ; if ( throwable instanceof TimeoutException ) { return new PingServiceHealth ( type , PingServiceHealth . PingState . TIMEOUT , id , timeUnit . toMicros ( timeout ) , local , remote , scope ) ; } else { LOGGER . warn ( "Error while running PingService for {}" , type , throwable ) ; return new PingServiceHealth ( type , PingServiceHealth . PingState . ERROR , id , TimeUnit . NANOSECONDS . toMicros ( System . nanoTime ( ) - request . get ( ) . creationTime ( ) ) , local , remote , scope ) ; } } } ) ; }
Helper method to perform the proper health conversion .
5,643
private FullBinaryMemcacheRequest helloRequest ( int connId ) throws Exception { byte [ ] key = generateAgentJson ( ctx . environment ( ) . userAgent ( ) , ctx . coreId ( ) , connId ) ; short keyLength = ( short ) key . length ; ByteBuf wanted = Unpooled . buffer ( features . size ( ) * 2 ) ; for ( ServerFeatures feature : features ) { wanted . writeShort ( feature . value ( ) ) ; } LOGGER . debug ( "Requesting supported features: {}" , features ) ; FullBinaryMemcacheRequest request = new DefaultFullBinaryMemcacheRequest ( key , Unpooled . EMPTY_BUFFER , wanted ) ; request . setOpcode ( HELLO_CMD ) ; request . setKeyLength ( keyLength ) ; request . setTotalBodyLength ( keyLength + wanted . readableBytes ( ) ) ; return request ; }
Creates the HELLO request to ask for certain supported features .
5,644
static byte [ ] generateAgentJson ( String agent , long coreId , long channelId ) throws Exception { String id = paddedHex ( coreId ) + "/" + paddedHex ( channelId ) ; if ( agent . length ( ) > 200 ) { agent = agent . substring ( 0 , 200 ) ; } HashMap < String , String > result = new HashMap < String , String > ( ) ; result . put ( "a" , agent ) ; result . put ( "i" , id ) ; return DefaultObjectMapper . writeValueAsBytes ( result ) ; }
Helper method to generate the user agent JSON .
5,645
public void channelActive ( final ChannelHandlerContext ctx ) throws Exception { this . ctx = ctx ; ctx . writeAndFlush ( new DefaultBinaryMemcacheRequest ( ) . setOpcode ( SASL_LIST_MECHS_OPCODE ) ) ; }
Once the channel is marked as active the SASL negotiation is started .
5,646
protected void channelRead0 ( ChannelHandlerContext ctx , FullBinaryMemcacheResponse msg ) throws Exception { if ( msg . getOpcode ( ) == SASL_LIST_MECHS_OPCODE ) { handleListMechsResponse ( ctx , msg ) ; } else if ( msg . getOpcode ( ) == SASL_AUTH_OPCODE ) { handleAuthResponse ( ctx , msg ) ; } else if ( msg . getOpcode ( ) == SASL_STEP_OPCODE ) { checkIsAuthed ( msg ) ; } }
Dispatches incoming SASL responses to the appropriate handler methods .
5,647
private void handleListMechsResponse ( ChannelHandlerContext ctx , FullBinaryMemcacheResponse msg ) throws Exception { String remote = ctx . channel ( ) . remoteAddress ( ) . toString ( ) ; String [ ] supportedMechanisms = msg . content ( ) . toString ( CharsetUtil . UTF_8 ) . split ( " " ) ; if ( supportedMechanisms . length == 0 ) { throw new AuthenticationException ( "Received empty SASL mechanisms list from server: " + remote ) ; } if ( forceSaslPlain ) { LOGGER . trace ( "Got SASL Mechs {} but forcing PLAIN due to config setting." , Arrays . asList ( supportedMechanisms ) ) ; supportedMechanisms = new String [ ] { "PLAIN" } ; } saslClient = Sasl . createSaslClient ( supportedMechanisms , null , "couchbase" , remote , null , this ) ; selectedMechanism = saslClient . getMechanismName ( ) ; int mechanismLength = selectedMechanism . length ( ) ; byte [ ] bytePayload = saslClient . hasInitialResponse ( ) ? saslClient . evaluateChallenge ( new byte [ ] { } ) : null ; ByteBuf payload = bytePayload != null ? ctx . alloc ( ) . buffer ( ) . writeBytes ( bytePayload ) : Unpooled . EMPTY_BUFFER ; FullBinaryMemcacheRequest initialRequest = new DefaultFullBinaryMemcacheRequest ( selectedMechanism . getBytes ( CharsetUtil . UTF_8 ) , Unpooled . EMPTY_BUFFER , payload ) ; initialRequest . setOpcode ( SASL_AUTH_OPCODE ) . setKeyLength ( ( short ) mechanismLength ) . setTotalBodyLength ( mechanismLength + payload . readableBytes ( ) ) ; ChannelFuture future = ctx . writeAndFlush ( initialRequest ) ; future . addListener ( new GenericFutureListener < Future < Void > > ( ) { public void operationComplete ( Future < Void > future ) throws Exception { if ( ! future . isSuccess ( ) ) { LOGGER . warn ( "Error during SASL Auth negotiation phase." , future ) ; } } } ) ; }
Handles an incoming SASL list mechanisms response and dispatches the next SASL AUTH step .
5,648
private void handleAuthResponse ( ChannelHandlerContext ctx , FullBinaryMemcacheResponse msg ) throws Exception { if ( saslClient . isComplete ( ) ) { checkIsAuthed ( msg ) ; return ; } byte [ ] response = new byte [ msg . content ( ) . readableBytes ( ) ] ; msg . content ( ) . readBytes ( response ) ; byte [ ] evaluatedBytes = saslClient . evaluateChallenge ( response ) ; if ( evaluatedBytes != null ) { ByteBuf content ; if ( selectedMechanism . equals ( "CRAM-MD5" ) || selectedMechanism . equals ( "PLAIN" ) ) { String [ ] evaluated = new String ( evaluatedBytes ) . split ( " " ) ; content = Unpooled . copiedBuffer ( username + "\0" + evaluated [ 1 ] , CharsetUtil . UTF_8 ) ; } else { content = Unpooled . wrappedBuffer ( evaluatedBytes ) ; } FullBinaryMemcacheRequest stepRequest = new DefaultFullBinaryMemcacheRequest ( selectedMechanism . getBytes ( CharsetUtil . UTF_8 ) , Unpooled . EMPTY_BUFFER , content ) ; stepRequest . setOpcode ( SASL_STEP_OPCODE ) . setKeyLength ( ( short ) selectedMechanism . length ( ) ) . setTotalBodyLength ( content . readableBytes ( ) + selectedMechanism . length ( ) ) ; ChannelFuture future = ctx . writeAndFlush ( stepRequest ) ; future . addListener ( new GenericFutureListener < Future < Void > > ( ) { public void operationComplete ( Future < Void > future ) throws Exception { if ( ! future . isSuccess ( ) ) { LOGGER . warn ( "Error during SASL Auth negotiation phase." , future ) ; } } } ) ; } else { throw new AuthenticationException ( "SASL Challenge evaluation returned null." ) ; } }
Handles an incoming SASL AUTH response and - if needed - dispatches the SASL STEPs .
5,649
private void checkIsAuthed ( final FullBinaryMemcacheResponse msg ) { switch ( msg . getStatus ( ) ) { case SASL_AUTH_SUCCESS : originalPromise . setSuccess ( ) ; ctx . pipeline ( ) . remove ( this ) ; ctx . fireChannelActive ( ) ; break ; case SASL_AUTH_FAILURE : originalPromise . setFailure ( new AuthenticationException ( "Authentication Failure" ) ) ; break ; default : originalPromise . setFailure ( new AuthenticationException ( "Unhandled SASL auth status: " + msg . getStatus ( ) ) ) ; } }
Once authentication is completed check the response and react appropriately to the upper layers .
5,650
protected long calculatePowerOfTwo ( long attempt ) { long step ; if ( attempt >= 64 ) { step = Long . MAX_VALUE ; } else { step = ( 1L << ( attempt - 1 ) ) ; } return Math . round ( step * growBy ) ; }
fastpath with bitwise operator
5,651
public static void retryOrCancel ( final CoreEnvironment environment , final CouchbaseRequest request , final EventSink < ResponseEvent > responseBuffer ) { if ( ! request . isActive ( ) ) { return ; } if ( environment . retryStrategy ( ) . shouldRetry ( request , environment ) ) { retry ( request , responseBuffer ) ; } else { request . observable ( ) . onError ( new RequestCancelledException ( "Could not dispatch request, cancelling " + "instead of retrying." ) ) ; } }
Either retry or cancel a request based on the strategy used .
5,652
public static void retry ( final CouchbaseRequest request , final EventSink < ResponseEvent > responseBuffer ) { if ( ! responseBuffer . tryPublishEvent ( ResponseHandler . RESPONSE_TRANSLATOR , request , request . observable ( ) ) ) { request . observable ( ) . onError ( CouchbaseCore . BACKPRESSURE_EXCEPTION ) ; } }
Always retry the request and send it into the response buffer .
5,653
public GenericAnalyticsResponse parse ( ) throws Exception { try { parser . parse ( ) ; responseContent . discardReadBytes ( ) ; LOGGER . trace ( "Received last chunk and completed parsing for requestId {}" , requestID ) ; } catch ( EOFException ex ) { LOGGER . trace ( "Still expecting more data for requestId {}" , requestID ) ; } if ( ! this . sentResponse && this . response != null ) { this . sentResponse = true ; return this . response ; } return null ; }
Instruct the parser to run a new parsing cycle on the current response content .
5,654
public void finishParsingAndReset ( ) { if ( queryRowObservable != null ) { queryRowObservable . onCompleted ( ) ; } if ( queryInfoObservable != null ) { queryInfoObservable . onCompleted ( ) ; } if ( queryErrorObservable != null ) { queryErrorObservable . onCompleted ( ) ; } if ( queryStatusObservable != null ) { queryStatusObservable . onCompleted ( ) ; } if ( querySignatureObservable != null ) { querySignatureObservable . onCompleted ( ) ; } if ( queryProfileInfoObservable != null ) { queryProfileInfoObservable . onCompleted ( ) ; } queryInfoObservable = null ; queryRowObservable = null ; queryErrorObservable = null ; queryStatusObservable = null ; querySignatureObservable = null ; queryProfileInfoObservable = null ; this . initialized = false ; }
Instruct the parser to finish the parsing and reset its internal state turning it back to uninitialized as well .
5,655
private void maybePushConfigChunk ( ) { String currentChunk = responseContent . toString ( CHARSET ) ; int separatorIndex = currentChunk . indexOf ( "\n\n\n\n" ) ; if ( separatorIndex > 0 ) { String content = currentChunk . substring ( 0 , separatorIndex ) ; streamingConfigObservable . onNext ( content . trim ( ) ) ; responseContent . clear ( ) ; responseContent . writeBytes ( currentChunk . substring ( separatorIndex + 4 ) . getBytes ( CHARSET ) ) ; } }
Push a config chunk into the streaming observable .
5,656
private void writeMetrics ( final CouchbaseResponse response ) { if ( currentRequest != null && currentOpTime >= 0 && env ( ) != null && env ( ) . networkLatencyMetricsCollector ( ) . isEnabled ( ) ) { try { Class < ? extends CouchbaseRequest > requestClass = currentRequest . getClass ( ) ; String simpleName = classNameCache . get ( requestClass ) ; if ( simpleName == null ) { simpleName = requestClass . getSimpleName ( ) ; classNameCache . put ( requestClass , simpleName ) ; } NetworkLatencyMetricsIdentifier identifier = new NetworkLatencyMetricsIdentifier ( remoteHostname , serviceType ( ) . toString ( ) , simpleName , response . status ( ) . toString ( ) ) ; env ( ) . networkLatencyMetricsCollector ( ) . record ( identifier , currentOpTime ) ; } catch ( Throwable e ) { LOGGER . warn ( "Could not collect latency metric for request {} ({})" , user ( currentRequest . toString ( ) ) , currentOpTime , e ) ; } } }
Helper method which creates the metrics for the current response and publishes them if enabled .
5,657
private void resetStatesAfterDecode ( final ChannelHandlerContext ctx ) { if ( traceEnabled ) { LOGGER . trace ( "{}Finished decoding of {}" , logIdent ( ctx , endpoint ) , currentRequest ) ; } currentRequest = null ; currentDecodingState = DecodingState . INITIAL ; }
Helper method which performs the final tasks in the decoding process .
5,658
private void initialDecodeTasks ( final ChannelHandlerContext ctx ) { currentRequest = sentRequestQueue . poll ( ) ; currentDecodingState = DecodingState . STARTED ; if ( currentRequest != null ) { Long st = sentRequestTimings . poll ( ) ; if ( st != null ) { currentOpTime = System . nanoTime ( ) - st ; } else { currentOpTime = - 1 ; } } if ( env ( ) . operationTracingEnabled ( ) ) { Span dispatchSpan = dispatchSpans . poll ( ) ; if ( dispatchSpan != null ) { currentDispatchSpan = dispatchSpan ; } } if ( traceEnabled ) { LOGGER . trace ( "{}Started decoding of {}" , logIdent ( ctx , endpoint ) , currentRequest ) ; } }
Helper method which performs the initial decoding process .
5,659
protected void publishResponse ( final CouchbaseResponse response , final Subject < CouchbaseResponse , CouchbaseResponse > observable ) { if ( response . status ( ) != ResponseStatus . RETRY && observable != null ) { if ( moveResponseOut ) { Scheduler scheduler = env ( ) . scheduler ( ) ; if ( scheduler instanceof CoreScheduler ) { scheduleDirect ( ( CoreScheduler ) scheduler , response , observable ) ; } else { scheduleWorker ( scheduler , response , observable ) ; } } else { completeResponse ( response , observable ) ; } } else { responseBuffer . publishEvent ( ResponseHandler . RESPONSE_TRANSLATOR , response , observable ) ; } }
Publishes a response with the attached observable .
5,660
private void completeResponse ( final CouchbaseResponse response , final Subject < CouchbaseResponse , CouchbaseResponse > observable ) { CouchbaseRequest request = response . request ( ) ; if ( request != null && ! request . isActive ( ) ) { if ( env ( ) . operationTracingEnabled ( ) && request . span ( ) != null ) { Scope scope = env ( ) . tracer ( ) . scopeManager ( ) . activate ( request . span ( ) , true ) ; scope . span ( ) . setBaggageItem ( "couchbase.orphan" , "true" ) ; scope . close ( ) ; } if ( env ( ) . orphanResponseReportingEnabled ( ) ) { env ( ) . orphanResponseReporter ( ) . report ( response ) ; } } try { observable . onNext ( response ) ; observable . onCompleted ( ) ; } catch ( Exception ex ) { LOGGER . warn ( "Caught exception while onNext on observable" , ex ) ; observable . onError ( ex ) ; } }
Fulfill and complete the response observable .
5,661
private void scheduleDirect ( CoreScheduler scheduler , final CouchbaseResponse response , final Subject < CouchbaseResponse , CouchbaseResponse > observable ) { scheduler . scheduleDirect ( new Action0 ( ) { public void call ( ) { completeResponse ( response , observable ) ; } } ) ; }
Optimized version of dispatching onto the core scheduler through direct scheduling .
5,662
private void scheduleWorker ( Scheduler scheduler , final CouchbaseResponse response , final Subject < CouchbaseResponse , CouchbaseResponse > observable ) { final Scheduler . Worker worker = scheduler . createWorker ( ) ; worker . schedule ( new Action0 ( ) { public void call ( ) { completeResponse ( response , observable ) ; worker . unsubscribe ( ) ; } } ) ; }
Dispatches the response on a generic scheduler through creating a worker .
5,663
private void channelActiveSideEffects ( final ChannelHandlerContext ctx ) { long interval = env ( ) . keepAliveInterval ( ) ; if ( env ( ) . continuousKeepAliveEnabled ( ) ) { continuousKeepAliveFuture = ctx . executor ( ) . scheduleAtFixedRate ( new Runnable ( ) { public void run ( ) { if ( shouldSendKeepAlive ( ) ) { createAndWriteKeepAlive ( ctx ) ; } } } , interval , interval , TimeUnit . MILLISECONDS ) ; } }
Helper method to perform certain side effects when the channel is connected .
5,664
private void handleOutstandingOperations ( final ChannelHandlerContext ctx ) { if ( sentRequestQueue . isEmpty ( ) ) { LOGGER . trace ( logIdent ( ctx , endpoint ) + "Not cancelling operations - sent queue is empty." ) ; return ; } LOGGER . debug ( logIdent ( ctx , endpoint ) + "Cancelling " + sentRequestQueue . size ( ) + " outstanding requests." ) ; while ( ! sentRequestQueue . isEmpty ( ) ) { REQUEST req = sentRequestQueue . poll ( ) ; try { sideEffectRequestToCancel ( req ) ; failSafe ( env ( ) . scheduler ( ) , moveResponseOut , req . observable ( ) , new RequestCancelledException ( "Request cancelled in-flight." ) ) ; } catch ( Exception ex ) { LOGGER . info ( "Exception thrown while cancelling outstanding operation: {}" , user ( req . toString ( ) ) , ex ) ; } } sentRequestTimings . clear ( ) ; }
Cancells any outstanding operations which are currently on the wire .
5,665
private void createAndWriteKeepAlive ( final ChannelHandlerContext ctx ) { final CouchbaseRequest keepAlive = createKeepAliveRequest ( ) ; if ( keepAlive != null ) { Subscriber < CouchbaseResponse > subscriber = new KeepAliveResponseAction ( ctx ) ; keepAlive . subscriber ( subscriber ) ; keepAlive . observable ( ) . timeout ( env ( ) . keepAliveTimeout ( ) , TimeUnit . MILLISECONDS ) . subscribe ( subscriber ) ; onKeepAliveFired ( ctx , keepAlive ) ; Channel channel = ctx . channel ( ) ; if ( channel . isActive ( ) && channel . isWritable ( ) ) { ctx . pipeline ( ) . writeAndFlush ( keepAlive ) ; } } }
Helper method to create write and flush the keepalive message .
5,666
protected void onKeepAliveFired ( ChannelHandlerContext ctx , CouchbaseRequest keepAliveRequest ) { if ( env ( ) . continuousKeepAliveEnabled ( ) && LOGGER . isTraceEnabled ( ) ) { LOGGER . trace ( logIdent ( ctx , endpoint ) + "Continuous KeepAlive fired" ) ; } else if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( logIdent ( ctx , endpoint ) + "KeepAlive fired" ) ; } }
Override to customize the behavior when a keep alive has been triggered and a keep alive request sent .
5,667
protected void onKeepAliveResponse ( ChannelHandlerContext ctx , CouchbaseResponse keepAliveResponse ) { if ( traceEnabled ) { LOGGER . trace ( logIdent ( ctx , endpoint ) + "keepAlive was answered, status " + keepAliveResponse . status ( ) ) ; } }
Override to customize the behavior when a keep alive has been responded to .
5,668
protected void completeRequestSpan ( final CouchbaseRequest request ) { if ( request != null && request . span ( ) != null ) { if ( env ( ) . operationTracingEnabled ( ) ) { env ( ) . tracer ( ) . scopeManager ( ) . activate ( request . span ( ) , true ) . close ( ) ; } } }
Helper method to complete the request span called from child instances .
5,669
protected String remoteHttpHost ( ChannelHandlerContext ctx ) { if ( remoteHttpHost == null ) { SocketAddress addr = ctx . channel ( ) . remoteAddress ( ) ; if ( addr instanceof InetSocketAddress ) { InetSocketAddress inetAddr = ( InetSocketAddress ) addr ; remoteHttpHost = inetAddr . getAddress ( ) . getHostAddress ( ) + ":" + inetAddr . getPort ( ) ; } else { remoteHttpHost = addr . toString ( ) ; } } return remoteHttpHost ; }
Helper method to return the remote http host cached .
5,670
public Subscription scheduleDirect ( Action0 action ) { PoolWorker pw = pool . get ( ) . getEventLoop ( ) ; return pw . scheduleActual ( action , - 1 , TimeUnit . NANOSECONDS ) ; }
Schedules the action directly on one of the event loop workers without the additional infrastructure and checking .
5,671
public Observable < Service > addService ( final AddServiceRequest request ) { LOGGER . debug ( "Got instructed to add Service {}, to Node {}" , request . type ( ) , request . hostname ( ) ) ; return nodeBy ( request . hostname ( ) ) . addService ( request ) ; }
Add the service to the node .
5,672
public Observable < Service > removeService ( final RemoveServiceRequest request ) { LOGGER . debug ( "Got instructed to remove Service {}, from Node {}" , request . type ( ) , request . hostname ( ) ) ; return nodeBy ( request . hostname ( ) ) . removeService ( request ) ; }
Remove a service from a node .
5,673
public Node nodeBy ( final NetworkAddress hostname ) { if ( hostname == null ) { return null ; } for ( Node node : nodes ) { if ( node . hostname ( ) . equals ( hostname ) ) { return node ; } } return null ; }
Returns the node by its hostname .
5,674
protected Locator locator ( final CouchbaseRequest request ) { if ( request instanceof BinaryRequest ) { return binaryLocator ; } else if ( request instanceof ViewRequest ) { return viewLocator ; } else if ( request instanceof QueryRequest ) { return queryLocator ; } else if ( request instanceof ConfigRequest ) { return configLocator ; } else if ( request instanceof SearchRequest ) { return searchLocator ; } else if ( request instanceof AnalyticsRequest ) { return analyticsLocator ; } else { throw new IllegalArgumentException ( "Unknown Request Type: " + request ) ; } }
Helper method to detect the correct locator for the given request type .
5,675
public Observable < DiagnosticsResponse > diagnostics ( final String id ) { List < Observable < EndpointHealth > > diags = new ArrayList < Observable < EndpointHealth > > ( nodes . size ( ) ) ; for ( Node node : nodes ) { diags . add ( node . diagnostics ( ) ) ; } final RingBufferDiagnostics ringBufferDiagnostics = RingBufferMonitor . instance ( ) . diagnostics ( ) ; return Observable . merge ( diags ) . toList ( ) . map ( new Func1 < List < EndpointHealth > , DiagnosticsResponse > ( ) { public DiagnosticsResponse call ( List < EndpointHealth > checks ) { return new DiagnosticsResponse ( new DiagnosticsReport ( checks , environment . userAgent ( ) , id , ringBufferDiagnostics ) ) ; } } ) ; }
Performs the logistics of collecting and assembling the individual health check information on a per - service basis .
5,676
public Observable < ClusterConfig > reconfigure ( final ClusterConfig config ) { LOGGER . debug ( "Starting reconfiguration." ) ; if ( config . bucketConfigs ( ) . values ( ) . isEmpty ( ) ) { LOGGER . debug ( "No open bucket found in config, disconnecting all nodes." ) ; List < Node > snapshotNodes ; synchronized ( nodes ) { snapshotNodes = new ArrayList < Node > ( nodes ) ; } if ( snapshotNodes . isEmpty ( ) ) { return Observable . just ( config ) ; } return Observable . from ( snapshotNodes ) . doOnNext ( new Action1 < Node > ( ) { public void call ( Node node ) { removeNode ( node ) ; node . disconnect ( ) . subscribe ( new Subscriber < LifecycleState > ( ) { public void onCompleted ( ) { } public void onError ( Throwable e ) { LOGGER . warn ( "Got error during node disconnect." , e ) ; } public void onNext ( LifecycleState lifecycleState ) { } } ) ; } } ) . last ( ) . map ( new Func1 < Node , ClusterConfig > ( ) { public ClusterConfig call ( Node node ) { return config ; } } ) ; } return Observable . just ( config ) . flatMap ( new Func1 < ClusterConfig , Observable < BucketConfig > > ( ) { public Observable < BucketConfig > call ( final ClusterConfig clusterConfig ) { return Observable . from ( clusterConfig . bucketConfigs ( ) . values ( ) ) ; } } ) . flatMap ( new Func1 < BucketConfig , Observable < Boolean > > ( ) { public Observable < Boolean > call ( BucketConfig bucketConfig ) { return reconfigureBucket ( bucketConfig ) ; } } ) . last ( ) . doOnNext ( new Action1 < Boolean > ( ) { public void call ( Boolean aBoolean ) { Set < NetworkAddress > configNodes = config . allNodeAddresses ( ) ; for ( Node node : nodes ) { if ( ! configNodes . contains ( node . hostname ( ) ) ) { LOGGER . debug ( "Removing and disconnecting node {}." , node . hostname ( ) ) ; removeNode ( node ) ; node . disconnect ( ) . subscribe ( new Subscriber < LifecycleState > ( ) { public void onCompleted ( ) { } public void onError ( Throwable e ) { LOGGER . warn ( "Got error during node disconnect." , e ) ; } public void onNext ( LifecycleState lifecycleState ) { } } ) ; } } } } ) . map ( new Func1 < Boolean , ClusterConfig > ( ) { public ClusterConfig call ( Boolean aBoolean ) { return config ; } } ) ; }
Helper method which grabs the current configuration and checks if the node setup is out of sync .
5,677
private Observable < Boolean > reconfigureBucket ( final BucketConfig config ) { LOGGER . debug ( "Starting reconfiguration for bucket {}" , config . name ( ) ) ; List < Observable < Boolean > > observables = new ArrayList < Observable < Boolean > > ( ) ; for ( final NodeInfo nodeInfo : config . nodes ( ) ) { final String alternate = nodeInfo . useAlternateNetwork ( ) ; final NetworkAddress altHost = alternate != null ? nodeInfo . alternateAddresses ( ) . get ( alternate ) . hostname ( ) : null ; Observable < Boolean > obs = addNode ( nodeInfo . hostname ( ) , altHost ) . flatMap ( new Func1 < LifecycleState , Observable < Map < ServiceType , Integer > > > ( ) { public Observable < Map < ServiceType , Integer > > call ( final LifecycleState lifecycleState ) { Map < ServiceType , Integer > services ; if ( alternate != null ) { AlternateAddress aa = nodeInfo . alternateAddresses ( ) . get ( alternate ) ; services = environment . sslEnabled ( ) ? aa . sslServices ( ) : aa . services ( ) ; } else { services = environment . sslEnabled ( ) ? nodeInfo . sslServices ( ) : nodeInfo . services ( ) ; } return Observable . just ( services ) ; } } ) . flatMap ( new Func1 < Map < ServiceType , Integer > , Observable < AddServiceRequest > > ( ) { public Observable < AddServiceRequest > call ( final Map < ServiceType , Integer > services ) { List < AddServiceRequest > requests = new ArrayList < AddServiceRequest > ( services . size ( ) ) ; for ( Map . Entry < ServiceType , Integer > service : services . entrySet ( ) ) { requests . add ( new AddServiceRequest ( service . getKey ( ) , config . name ( ) , config . username ( ) , config . password ( ) , service . getValue ( ) , nodeInfo . hostname ( ) ) ) ; } return Observable . from ( requests ) ; } } ) . flatMap ( new Func1 < AddServiceRequest , Observable < Service > > ( ) { public Observable < Service > call ( AddServiceRequest request ) { return addService ( request ) ; } } ) . last ( ) . map ( new Func1 < Service , Boolean > ( ) { public Boolean call ( Service service ) { return true ; } } ) ; observables . add ( obs ) ; } return Observable . merge ( observables ) . last ( ) ; }
For every bucket that is open apply the reconfiguration .
5,678
private static Endpoint selectByPartition ( final List < Endpoint > endpoints , final short partition ) { if ( partition >= 0 ) { int numEndpoints = endpoints . size ( ) ; Endpoint endpoint = numEndpoints == 1 ? endpoints . get ( 0 ) : endpoints . get ( partition % numEndpoints ) ; if ( endpoint != null && endpoint . isState ( LifecycleState . CONNECTED ) && endpoint . isFree ( ) ) { return endpoint ; } return null ; } else { return selectFirstConnected ( endpoints ) ; } }
Helper method to select the proper target endpoint by partition .
5,679
private static Endpoint selectFirstConnected ( final List < Endpoint > endpoints ) { for ( Endpoint endpoint : endpoints ) { if ( endpoint . isState ( LifecycleState . CONNECTED ) && endpoint . isFree ( ) ) { return endpoint ; } } return null ; }
Helper method to select the first connected endpoint if no particular pinning is needed .
5,680
public int totalCount ( ) { int total = countNonService ; for ( Map . Entry < ServiceType , Integer > entry : counts . entrySet ( ) ) { total += entry . getValue ( ) ; } return total ; }
Returns the count of all requests in the ringbuffer
5,681
private String encodeKeysGet ( String keys ) { try { return URLEncoder . encode ( keys , "UTF-8" ) ; } catch ( Exception ex ) { throw new RuntimeException ( "Could not prepare view argument: " + ex ) ; } }
Encodes the keys JSON array into an URL - encoded form suitable for a GET on query service .
5,682
private void parseQueryResponse ( boolean last ) { if ( viewParsingState == QUERY_STATE_INITIAL ) { parseViewInitial ( ) ; } if ( viewParsingState == QUERY_STATE_INFO ) { parseViewInfo ( ) ; } if ( viewParsingState == QUERY_STATE_ROWS ) { parseViewRows ( last ) ; } if ( viewParsingState == QUERY_STATE_ERROR ) { parseViewError ( last ) ; } if ( viewParsingState == QUERY_STATE_DONE ) { cleanupViewStates ( ) ; } }
Main dispatch method for a query parse cycle .
5,683
private void parseViewInitial ( ) { switch ( responseHeader . getStatus ( ) . code ( ) ) { case 200 : viewParsingState = QUERY_STATE_INFO ; break ; default : viewInfoObservable . onCompleted ( ) ; viewRowObservable . onCompleted ( ) ; viewParsingState = QUERY_STATE_ERROR ; } }
Parse the initial view query state .
5,684
private void parseViewError ( boolean last ) { if ( ! last ) { return ; } if ( responseHeader . getStatus ( ) . code ( ) == 200 ) { int openBracketPos = responseContent . bytesBefore ( ( byte ) '[' ) + responseContent . readerIndex ( ) ; int closeBracketLength = findSectionClosingPosition ( responseContent , '[' , ']' ) - openBracketPos + 1 ; ByteBuf slice = responseContent . slice ( openBracketPos , closeBracketLength ) ; viewErrorObservable . onNext ( "{\"errors\":" + slice . toString ( CharsetUtil . UTF_8 ) + "}" ) ; } else { viewErrorObservable . onNext ( "{\"errors\":[" + responseContent . toString ( CharsetUtil . UTF_8 ) + "]}" ) ; } viewErrorObservable . onCompleted ( ) ; viewParsingState = QUERY_STATE_DONE ; responseContent . discardReadBytes ( ) ; }
The query response is an error parse it and attache it to the observable .
5,685
private void parseViewInfo ( ) { int rowsStart = - 1 ; for ( int i = responseContent . readerIndex ( ) ; i < responseContent . writerIndex ( ) - 2 ; i ++ ) { byte curr = responseContent . getByte ( i ) ; byte f1 = responseContent . getByte ( i + 1 ) ; byte f2 = responseContent . getByte ( i + 2 ) ; if ( curr == '"' && f1 == 'r' && f2 == 'o' ) { rowsStart = i ; break ; } } if ( rowsStart == - 1 ) { return ; } ByteBuf info = responseContent . readBytes ( rowsStart - responseContent . readerIndex ( ) ) ; int closingPointer = info . forEachByteDesc ( new ByteBufProcessor ( ) { public boolean process ( byte value ) throws Exception { return value != ',' ; } } ) ; if ( closingPointer > 0 ) { info . setByte ( closingPointer , '}' ) ; viewInfoObservable . onNext ( info ) ; } else { info . release ( ) ; viewInfoObservable . onNext ( Unpooled . EMPTY_BUFFER ) ; } viewInfoObservable . onCompleted ( ) ; viewParsingState = QUERY_STATE_ROWS ; }
Parse out the info portion from the header part of the query response .
5,686
private void parseViewRows ( boolean last ) { while ( true ) { int openBracketPos = responseContent . bytesBefore ( ( byte ) '{' ) ; int errorBlockPosition = findErrorBlockPosition ( openBracketPos ) ; if ( errorBlockPosition > 0 && errorBlockPosition < openBracketPos ) { responseContent . readerIndex ( errorBlockPosition + responseContent . readerIndex ( ) ) ; viewRowObservable . onCompleted ( ) ; viewParsingState = QUERY_STATE_ERROR ; return ; } int closeBracketPos = findSectionClosingPosition ( responseContent , '{' , '}' ) ; if ( closeBracketPos == - 1 ) { break ; } int from = responseContent . readerIndex ( ) + openBracketPos ; int to = closeBracketPos - openBracketPos - responseContent . readerIndex ( ) + 1 ; viewRowObservable . onNext ( responseContent . slice ( from , to ) . copy ( ) ) ; responseContent . readerIndex ( closeBracketPos ) ; responseContent . discardReadBytes ( ) ; } if ( last ) { viewRowObservable . onCompleted ( ) ; viewErrorObservable . onCompleted ( ) ; viewParsingState = QUERY_STATE_DONE ; } }
Streaming parse the actual rows from the response and pass to the underlying observable .
5,687
private static long calculateKetamaHash ( final byte [ ] key ) { try { MessageDigest md5 = MessageDigest . getInstance ( "MD5" ) ; md5 . update ( key ) ; byte [ ] digest = md5 . digest ( ) ; long rv = ( ( long ) ( digest [ 3 ] & 0xFF ) << 24 ) | ( ( long ) ( digest [ 2 ] & 0xFF ) << 16 ) | ( ( long ) ( digest [ 1 ] & 0xFF ) << 8 ) | ( digest [ 0 ] & 0xFF ) ; return rv & 0xffffffffL ; } catch ( NoSuchAlgorithmException e ) { throw new IllegalStateException ( "Could not encode ketama hash." , e ) ; } }
Calculates the ketama hash for the given key .
5,688
private void parseQueryResponse ( boolean lastChunk ) { if ( sectionDone || queryParsingState == QUERY_STATE_INITIAL ) { queryParsingState = transitionToNextToken ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_SIGNATURE ) { parseQuerySignature ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_ROWS_DECIDE ) { decideBetweenRawAndObjects ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_ROWS ) { parseQueryRows ( lastChunk ) ; } else if ( queryParsingState == QUERY_STATE_ROWS_RAW ) { parseQueryRowsRaw ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_ERROR ) { parseQueryError ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_WARNING ) { parseQueryError ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_STATUS ) { parseQueryStatus ( lastChunk ) ; } if ( queryParsingState == QUERY_STATE_INFO ) { parseQueryInfo ( lastChunk ) ; } else if ( queryParsingState == QUERY_STATE_NO_INFO ) { finishInfo ( ) ; } if ( queryParsingState == QUERY_STATE_DONE ) { sectionDone = lastChunk ; queryProfileInfoObservable . onCompleted ( ) ; if ( sectionDone ) { cleanupQueryStates ( ) ; } } }
Generic dispatch method to parse the query response chunks .
5,689
private void parseQuerySignature ( boolean lastChunk ) { ByteBufProcessor processor = null ; int openPos = responseContent . forEachByte ( new WhitespaceSkipper ( ) ) - responseContent . readerIndex ( ) ; if ( openPos < 0 ) { return ; } char openChar = ( char ) responseContent . getByte ( responseContent . readerIndex ( ) + openPos ) ; if ( openChar == '{' ) { processor = new ClosingPositionBufProcessor ( '{' , '}' , true ) ; } else if ( openChar == '[' ) { processor = new ClosingPositionBufProcessor ( '[' , ']' , true ) ; } else if ( openChar == '"' ) { processor = new StringClosingPositionBufProcessor ( ) ; } int closePos ; if ( processor != null ) { closePos = responseContent . forEachByte ( processor ) - responseContent . readerIndex ( ) ; } else { closePos = findNextChar ( responseContent , ',' ) - 1 ; } if ( closePos > 0 ) { responseContent . skipBytes ( openPos ) ; int length = closePos - openPos + 1 ; ByteBuf signature = responseContent . readSlice ( length ) ; querySignatureObservable . onNext ( signature . copy ( ) ) ; } else { return ; } sectionDone ( ) ; queryParsingState = transitionToNextToken ( lastChunk ) ; }
Parse the signature section in the N1QL response .
5,690
private void parseQueryRowsRaw ( boolean lastChunk ) { while ( responseContent . isReadable ( ) ) { int splitPos = findSplitPosition ( responseContent , ',' ) ; int arrayEndPos = findSplitPosition ( responseContent , ']' ) ; boolean doSectionDone = false ; if ( splitPos == - 1 && arrayEndPos == - 1 ) { break ; } else if ( arrayEndPos > 0 && ( arrayEndPos < splitPos || splitPos == - 1 ) ) { splitPos = arrayEndPos ; doSectionDone = true ; } int length = splitPos - responseContent . readerIndex ( ) ; ByteBuf resultSlice = responseContent . readSlice ( length ) ; queryRowObservable . onNext ( resultSlice . copy ( ) ) ; responseContent . skipBytes ( 1 ) ; responseContent . discardReadBytes ( ) ; if ( doSectionDone ) { sectionDone ( ) ; queryParsingState = transitionToNextToken ( lastChunk ) ; break ; } } }
Parses the query raw results from the content stream as long as there is data to be found .
5,691
private void parseQueryError ( boolean lastChunk ) { while ( true ) { int openBracketPos = findNextChar ( responseContent , '{' ) ; if ( isEmptySection ( openBracketPos ) || ( lastChunk && openBracketPos < 0 ) ) { sectionDone ( ) ; queryParsingState = transitionToNextToken ( lastChunk ) ; break ; } int closeBracketPos = findSectionClosingPosition ( responseContent , '{' , '}' ) ; if ( closeBracketPos == - 1 ) { break ; } int length = closeBracketPos - openBracketPos - responseContent . readerIndex ( ) + 1 ; responseContent . skipBytes ( openBracketPos ) ; ByteBuf resultSlice = responseContent . readSlice ( length ) ; queryErrorObservable . onNext ( resultSlice . copy ( ) ) ; } }
Parses the errors and warnings from the content stream as long as there are some to be found .
5,692
protected void remove ( I identifier ) { LatencyStats removed = latencyMetrics . remove ( identifier ) ; if ( removed != null ) { try { removed . stop ( ) ; } catch ( Exception ex ) { LOGGER . warn ( "Caught exception while removing LatencyStats, moving on." , ex ) ; } } }
Helper method to remove an item out of the stored metrics .
5,693
private byte [ ] hmac ( byte [ ] key , byte [ ] data ) { try { final Mac mac = Mac . getInstance ( hmacAlgorithm ) ; mac . init ( new SecretKeySpec ( key , mac . getAlgorithm ( ) ) ) ; return mac . doFinal ( data ) ; } catch ( InvalidKeyException e ) { if ( key . length == 0 ) { throw new UnsupportedOperationException ( "This JVM does not support empty HMAC keys (empty passwords). " + "Please set a bucket password or upgrade your JVM." ) ; } else { throw new RuntimeException ( "Failed to generate HMAC hash for password" , e ) ; } } catch ( Throwable t ) { throw new RuntimeException ( t ) ; } }
Generate the HMAC with the given SHA algorithm
5,694
private static void xor ( byte [ ] result , byte [ ] other ) { for ( int i = 0 ; i < result . length ; ++ i ) { result [ i ] = ( byte ) ( result [ i ] ^ other [ i ] ) ; } }
XOR the two arrays and store the result in the first one .
5,695
private static Set < NetworkAddress > buildNodesWithPrimaryPartitions ( final List < NodeInfo > nodeInfos , final List < Partition > partitions ) { Set < NetworkAddress > nodes = new HashSet < NetworkAddress > ( nodeInfos . size ( ) ) ; for ( Partition partition : partitions ) { int index = partition . master ( ) ; if ( index >= 0 ) { nodes . add ( nodeInfos . get ( index ) . hostname ( ) ) ; } } return nodes ; }
Pre - computes a set of nodes that have primary partitions active .
5,696
void logOrphans ( final List < Map < String , Object > > toLog ) { try { String result = pretty ? prettyWriter ( ) . writeValueAsString ( toLog ) : writer ( ) . writeValueAsString ( toLog ) ; LOGGER . warn ( "Orphan responses observed: {}" , result ) ; } catch ( Exception ex ) { LOGGER . warn ( "Could not write orphan log." , ex ) ; } }
This method is intended to be overridden in test implementations to assert against the output .
5,697
public static Map < String , Object > collect ( ) { Map < String , Object > infos = new TreeMap < String , Object > ( ) ; systemInfo ( infos ) ; memInfo ( infos ) ; threadInfo ( infos ) ; gcInfo ( infos ) ; runtimeInfo ( infos ) ; return infos ; }
Collects all available infos in one map .
5,698
public static String collectAndFormat ( ) { Map < String , Object > infos = collect ( ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Diagnostics {\n" ) ; int count = 0 ; for ( Map . Entry < String , Object > info : infos . entrySet ( ) ) { if ( count ++ > 0 ) { sb . append ( ",\n" ) ; } sb . append ( " " ) . append ( info . getKey ( ) ) . append ( "=" ) . append ( info . getValue ( ) ) ; } sb . append ( "\n}" ) ; return sb . toString ( ) ; }
Collects all available infos and formats it in a better readable way .
5,699
public void channelActive ( final ChannelHandlerContext ctx ) throws Exception { this . ctx = ctx ; if ( selectBucketEnabled ) { byte [ ] key = bucket . getBytes ( ) ; short keyLength = ( short ) bucket . length ( ) ; BinaryMemcacheRequest request = new DefaultBinaryMemcacheRequest ( key ) ; request . setOpcode ( SELECT_BUCKET_OPCODE ) ; request . setKeyLength ( keyLength ) ; request . setTotalBodyLength ( keyLength ) ; this . ctx . writeAndFlush ( request ) ; } else { originalPromise . setSuccess ( ) ; this . ctx . pipeline ( ) . remove ( this ) ; this . ctx . fireChannelActive ( ) ; } }
Once the channel is marked as active select bucket command is sent if the HELLO request has SELECT_BUCKET feature enabled .