index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTAND.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTAND.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTAND extends SimpleNode { public ASTAND(int id) { super(id); } public ASTAND(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=47624dd380cfe00f384e5c8af03d69b3 (do not edit this line) */
9,900
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTNUM.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTNUM.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTNUM extends SimpleNode { public ASTNUM(int id) { super(id); } public ASTNUM(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=8c978f5115fa4de48e54d30148a58bf1 (do not edit this line) */
9,901
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTOR.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTOR.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTOR extends SimpleNode { public ASTOR(int id) { super(id); } public ASTOR(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=b1f74c0c73a8c4b265e886c9e24da36a (do not edit this line) */
9,902
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTNEQ.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTNEQ.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTNEQ extends SimpleNode { public ASTNEQ(int id) { super(id); } public ASTNEQ(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=4ed9b560c660a3074aeba567abf5c70b (do not edit this line) */
9,903
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/PartitionParserVisitor.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. PartitionParserVisitor.java Version 6.1_2 */ package com.netflix.metacat.common.server.partition.parser; public interface PartitionParserVisitor { public Object visit(SimpleNode node, Object data); public Object visit(ASTFILTER node, Object data); public Object visit(ASTAND node, Object data); public Object visit(ASTOR node, Object data); public Object visit(ASTNOT node, Object data); public Object visit(ASTBETWEEN node, Object data); public Object visit(ASTIN node, Object data); public Object visit(ASTLIKE node, Object data); public Object visit(ASTNULL node, Object data); public Object visit(ASTCOMPARE node, Object data); public Object visit(ASTGT node, Object data); public Object visit(ASTLT node, Object data); public Object visit(ASTLTE node, Object data); public Object visit(ASTGTE node, Object data); public Object visit(ASTEQ node, Object data); public Object visit(ASTNEQ node, Object data); public Object visit(ASTMATCHES node, Object data); public Object visit(ASTNUM node, Object data); public Object visit(ASTSTRING node, Object data); public Object visit(ASTBOOLEAN node, Object data); public Object visit(ASTVAR node, Object data); } /* JavaCC - OriginalChecksum=c4e323f2dccde5ac3f4e73a7741c2619 (do not edit this line) */
9,904
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/TokenMgrError.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 6.1 */ /* JavaCCOptions: */ package com.netflix.metacat.common.server.partition.parser; /** Token Manager Error. */ public class TokenMgrError extends Error { /** * The version identifier for this Serializable class. * Increment only if the <i>serialized</i> form of the * class changes. */ private static final long serialVersionUID = 1L; /* * Ordinals for various reasons why an Error of this type can be thrown. */ /** * Lexical error occurred. */ public static final int LEXICAL_ERROR = 0; /** * An attempt was made to create a second instance of a static token manager. */ public static final int STATIC_LEXER_ERROR = 1; /** * Tried to change to an invalid lexical state. */ public static final int INVALID_LEXICAL_STATE = 2; /** * Detected (and bailed out of) an infinite loop in the token manager. */ public static final int LOOP_DETECTED = 3; /** * Indicates the reason why the exception is thrown. It will have * one of the above 4 values. */ int errorCode; /** * Replaces unprintable characters by their escaped (or unicode escaped) * equivalents in the given string */ protected static final String addEscapes(String str) { StringBuffer retval = new StringBuffer(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } /** * Returns a detailed message for the Error when it is thrown by the * token manager to indicate a lexical error. * Parameters : * EOFSeen : indicates if EOF caused the lexical error * curLexState : lexical state in which this error occurred * errorLine : line number when the error occurred * errorColumn : column number when the error occurred * errorAfter : prefix that was seen before this error occurred * curchar : the offending character * Note: You can customize the lexical error message by modifying this method. */ protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) { char curChar1 = (char)curChar; return("Lexical error at line " + errorLine + ", column " + errorColumn + ". Encountered: " + (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + (int)curChar + "), ") + "after : \"" + addEscapes(errorAfter) + "\""); } /** * You can also modify the body of this method to customize your error messages. * For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not * of end-users concern, so you can return something like : * * "Internal Error : Please file a bug report .... " * * from this method for such cases in the release version of your parser. */ public String getMessage() { return super.getMessage(); } /* * Constructors of various flavors follow. */ /** No arg constructor. */ public TokenMgrError() { } /** Constructor with message and reason. */ public TokenMgrError(String message, int reason) { super(message); errorCode = reason; } /** Full Constructor. */ public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) { this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason); } } /* JavaCC - OriginalChecksum=39aae34c59a71eabaec2e72513b570b1 (do not edit this line) */
9,905
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTEQ.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTEQ.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTEQ extends SimpleNode { public ASTEQ(int id) { super(id); } public ASTEQ(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=bbf13f81c94ea7197914ce9f46cc3526 (do not edit this line) */
9,906
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTFILTER.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTFILTER.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTFILTER extends SimpleNode { public ASTFILTER(int id) { super(id); } public ASTFILTER(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=502a3e691142a2ee92a5d005f0a1bb28 (do not edit this line) */
9,907
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/PartitionParser.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* PartitionParser.java */ /* Generated By:JJTree&JavaCC: Do not edit this line. PartitionParser.java */ package com.netflix.metacat.common.server.partition.parser; public class PartitionParser/*@bgen(jjtree)*/implements PartitionParserTreeConstants, PartitionParserConstants {/*@bgen(jjtree)*/ protected JJTPartitionParserState jjtree = new JJTPartitionParserState();public static void main (String args []) throws ParseException { PartitionParser parser = new PartitionParser(new java.io.StringReader(args[0])); SimpleNode root = parser.filter(); root.dump(""); System.out.println(root.jjtAccept(new com.netflix.metacat.common.server.partition.visitor.PartitionParserEval(), null)); } final public SimpleNode filter() throws ParseException {/*@bgen(jjtree) FILTER */ ASTFILTER jjtn000 = new ASTFILTER(JJTFILTER); boolean jjtc000 = true; jjtree.openNodeScope(jjtn000); try { expr(); jj_consume_token(0); jjtree.closeNodeScope(jjtn000, true); jjtc000 = false; {if ("" != null) return jjtn000;} } catch (Throwable jjte000) { if (jjtc000) { jjtree.clearNodeScope(jjtn000); jjtc000 = false; } else { jjtree.popNode(); } if (jjte000 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte000;} } if (jjte000 instanceof ParseException) { {if (true) throw (ParseException)jjte000;} } {if (true) throw (Error)jjte000;} } finally { if (jjtc000) { jjtree.closeNodeScope(jjtn000, true); } } throw new Error("Missing return statement in function"); } final public void expr() throws ParseException { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case LPAREN:{ jj_consume_token(LPAREN); expr(); jj_consume_token(RPAREN); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case AND: case OR:{ switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case AND:{ jj_consume_token(AND); ASTAND jjtn001 = new ASTAND(JJTAND); boolean jjtc001 = true; jjtree.openNodeScope(jjtn001); try { expr(); } catch (Throwable jjte001) { if (jjtc001) { jjtree.clearNodeScope(jjtn001); jjtc001 = false; } else { jjtree.popNode(); } if (jjte001 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte001;} } if (jjte001 instanceof ParseException) { {if (true) throw (ParseException)jjte001;} } {if (true) throw (Error)jjte001;} } finally { if (jjtc001) { jjtree.closeNodeScope(jjtn001, 2); } } break; } case OR:{ jj_consume_token(OR); ASTOR jjtn002 = new ASTOR(JJTOR); boolean jjtc002 = true; jjtree.openNodeScope(jjtn002); try { expr(); } catch (Throwable jjte002) { if (jjtc002) { jjtree.clearNodeScope(jjtn002); jjtc002 = false; } else { jjtree.popNode(); } if (jjte002 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte002;} } if (jjte002 instanceof ParseException) { {if (true) throw (ParseException)jjte002;} } {if (true) throw (Error)jjte002;} } finally { if (jjtc002) { jjtree.closeNodeScope(jjtn002, 2); } } break; } default: jj_la1[0] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; } default: jj_la1[1] = jj_gen; ; } break; } default: jj_la1[4] = jj_gen; if (jj_2_1(1)) { EvalExpr(); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case AND: case OR:{ switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case AND:{ jj_consume_token(AND); ASTAND jjtn003 = new ASTAND(JJTAND); boolean jjtc003 = true; jjtree.openNodeScope(jjtn003); try { expr(); } catch (Throwable jjte003) { if (jjtc003) { jjtree.clearNodeScope(jjtn003); jjtc003 = false; } else { jjtree.popNode(); } if (jjte003 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte003;} } if (jjte003 instanceof ParseException) { {if (true) throw (ParseException)jjte003;} } {if (true) throw (Error)jjte003;} } finally { if (jjtc003) { jjtree.closeNodeScope(jjtn003, 2); } } break; } case OR:{ jj_consume_token(OR); ASTOR jjtn004 = new ASTOR(JJTOR); boolean jjtc004 = true; jjtree.openNodeScope(jjtn004); try { expr(); } catch (Throwable jjte004) { if (jjtc004) { jjtree.clearNodeScope(jjtn004); jjtc004 = false; } else { jjtree.popNode(); } if (jjte004 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte004;} } if (jjte004 instanceof ParseException) { {if (true) throw (ParseException)jjte004;} } {if (true) throw (Error)jjte004;} } finally { if (jjtc004) { jjtree.closeNodeScope(jjtn004, 2); } } break; } default: jj_la1[2] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; } default: jj_la1[3] = jj_gen; ; } } else { jj_consume_token(-1); throw new ParseException(); } } } final public void EvalExpr() throws ParseException {boolean not = false; ASTNOT jjtn001 = new ASTNOT(JJTNOT); boolean jjtc001 = true; jjtree.openNodeScope(jjtn001); try { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case NOT:{ jj_consume_token(NOT); not=true; break; } default: jj_la1[5] = jj_gen; ; } eval(); } catch (Throwable jjte001) { if (jjtc001) { jjtree.clearNodeScope(jjtn001); jjtc001 = false; } else { jjtree.popNode(); } if (jjte001 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte001;} } if (jjte001 instanceof ParseException) { {if (true) throw (ParseException)jjte001;} } {if (true) throw (Error)jjte001;} } finally { if (jjtc001) { jjtree.closeNodeScope(jjtn001, not); } } } final public void eval() throws ParseException { if (getToken(2).kind==BETWEEN || getToken(3).kind==BETWEEN) { BetweenEval(); } else if (getToken(2).kind==IN || getToken(3).kind==IN) { InEval(); } else if (getToken(2).kind==LIKE || getToken(3).kind==LIKE) { LikeEval(); } else if (getToken(2).kind==IS) { NullEval(); } else { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case INT: case FLOAT: case BOOLEAN: case VARIABLE: case QUOTE: case SQUOTE:{ CompareEval(); break; } default: jj_la1[6] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } } final public void BetweenEval() throws ParseException {/*@bgen(jjtree) BETWEEN */ ASTBETWEEN jjtn000 = new ASTBETWEEN(JJTBETWEEN); boolean jjtc000 = true; jjtree.openNodeScope(jjtn000); try { term(); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case NOT:{ jj_consume_token(NOT); jjtn000.not=true; break; } default: jj_la1[7] = jj_gen; ; } jj_consume_token(BETWEEN); term(); jj_consume_token(AND); term(); } catch (Throwable jjte000) { if (jjtc000) { jjtree.clearNodeScope(jjtn000); jjtc000 = false; } else { jjtree.popNode(); } if (jjte000 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte000;} } if (jjte000 instanceof ParseException) { {if (true) throw (ParseException)jjte000;} } {if (true) throw (Error)jjte000;} } finally { if (jjtc000) { jjtree.closeNodeScope(jjtn000, true); } } } final public void InEval() throws ParseException {/*@bgen(jjtree) IN */ ASTIN jjtn000 = new ASTIN(JJTIN); boolean jjtc000 = true; jjtree.openNodeScope(jjtn000); try { term(); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case NOT:{ jj_consume_token(NOT); jjtn000.not=true; break; } default: jj_la1[8] = jj_gen; ; } jj_consume_token(IN); jj_consume_token(LPAREN); term(); label_1: while (true) { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case COMMA:{ ; break; } default: jj_la1[9] = jj_gen; break label_1; } jj_consume_token(COMMA); term(); } jj_consume_token(RPAREN); } catch (Throwable jjte000) { if (jjtc000) { jjtree.clearNodeScope(jjtn000); jjtc000 = false; } else { jjtree.popNode(); } if (jjte000 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte000;} } if (jjte000 instanceof ParseException) { {if (true) throw (ParseException)jjte000;} } {if (true) throw (Error)jjte000;} } finally { if (jjtc000) { jjtree.closeNodeScope(jjtn000, true); } } } final public void LikeEval() throws ParseException {/*@bgen(jjtree) LIKE */ ASTLIKE jjtn000 = new ASTLIKE(JJTLIKE); boolean jjtc000 = true; jjtree.openNodeScope(jjtn000); try { term(); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case NOT:{ jj_consume_token(NOT); jjtn000.not=true; break; } default: jj_la1[10] = jj_gen; ; } jj_consume_token(LIKE); term(); } catch (Throwable jjte000) { if (jjtc000) { jjtree.clearNodeScope(jjtn000); jjtc000 = false; } else { jjtree.popNode(); } if (jjte000 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte000;} } if (jjte000 instanceof ParseException) { {if (true) throw (ParseException)jjte000;} } {if (true) throw (Error)jjte000;} } finally { if (jjtc000) { jjtree.closeNodeScope(jjtn000, true); } } } final public void NullEval() throws ParseException {/*@bgen(jjtree) NULL */ ASTNULL jjtn000 = new ASTNULL(JJTNULL); boolean jjtc000 = true; jjtree.openNodeScope(jjtn000); try { term(); jj_consume_token(IS); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case NOT:{ jj_consume_token(NOT); jjtn000.not=true; break; } default: jj_la1[11] = jj_gen; ; } jj_consume_token(NULL); } catch (Throwable jjte000) { if (jjtc000) { jjtree.clearNodeScope(jjtn000); jjtc000 = false; } else { jjtree.popNode(); } if (jjte000 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte000;} } if (jjte000 instanceof ParseException) { {if (true) throw (ParseException)jjte000;} } {if (true) throw (Error)jjte000;} } finally { if (jjtc000) { jjtree.closeNodeScope(jjtn000, true); } } } final public void CompareEval() throws ParseException {/*@bgen(jjtree) COMPARE */ ASTCOMPARE jjtn000 = new ASTCOMPARE(JJTCOMPARE); boolean jjtc000 = true; jjtree.openNodeScope(jjtn000); try { term(); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case EQUAL: case NEQUAL: case GTE: case GT: case LTE: case LT: case MATCHES:{ comp(); term(); break; } default: jj_la1[12] = jj_gen; ; } } catch (Throwable jjte000) { if (jjtc000) { jjtree.clearNodeScope(jjtn000); jjtc000 = false; } else { jjtree.popNode(); } if (jjte000 instanceof RuntimeException) { {if (true) throw (RuntimeException)jjte000;} } if (jjte000 instanceof ParseException) { {if (true) throw (ParseException)jjte000;} } {if (true) throw (Error)jjte000;} } finally { if (jjtc000) { jjtree.closeNodeScope(jjtn000, true); } } } final public void comp() throws ParseException { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case GT:{ ASTGT jjtn001 = new ASTGT(JJTGT); boolean jjtc001 = true; jjtree.openNodeScope(jjtn001); try { jj_consume_token(GT); } finally { if (jjtc001) { jjtree.closeNodeScope(jjtn001, true); } } break; } case LT:{ ASTLT jjtn002 = new ASTLT(JJTLT); boolean jjtc002 = true; jjtree.openNodeScope(jjtn002); try { jj_consume_token(LT); } finally { if (jjtc002) { jjtree.closeNodeScope(jjtn002, true); } } break; } case LTE:{ ASTLTE jjtn003 = new ASTLTE(JJTLTE); boolean jjtc003 = true; jjtree.openNodeScope(jjtn003); try { jj_consume_token(LTE); } finally { if (jjtc003) { jjtree.closeNodeScope(jjtn003, true); } } break; } case GTE:{ ASTGTE jjtn004 = new ASTGTE(JJTGTE); boolean jjtc004 = true; jjtree.openNodeScope(jjtn004); try { jj_consume_token(GTE); } finally { if (jjtc004) { jjtree.closeNodeScope(jjtn004, true); } } break; } case EQUAL:{ ASTEQ jjtn005 = new ASTEQ(JJTEQ); boolean jjtc005 = true; jjtree.openNodeScope(jjtn005); try { jj_consume_token(EQUAL); } finally { if (jjtc005) { jjtree.closeNodeScope(jjtn005, true); } } break; } case NEQUAL:{ ASTNEQ jjtn006 = new ASTNEQ(JJTNEQ); boolean jjtc006 = true; jjtree.openNodeScope(jjtn006); try { jj_consume_token(NEQUAL); } finally { if (jjtc006) { jjtree.closeNodeScope(jjtn006, true); } } break; } case MATCHES:{ ASTMATCHES jjtn007 = new ASTMATCHES(JJTMATCHES); boolean jjtc007 = true; jjtree.openNodeScope(jjtn007); try { jj_consume_token(MATCHES); } finally { if (jjtc007) { jjtree.closeNodeScope(jjtn007, true); } } break; } default: jj_la1[13] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } final public void term() throws ParseException {Token t; StringBuilder builder = new StringBuilder(); switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case INT:{ t = jj_consume_token(INT); ASTNUM jjtn001 = new ASTNUM(JJTNUM); boolean jjtc001 = true; jjtree.openNodeScope(jjtn001); try { jjtree.closeNodeScope(jjtn001, true); jjtc001 = false; jjtn001.value = new java.math.BigDecimal(t.image); } finally { if (jjtc001) { jjtree.closeNodeScope(jjtn001, true); } } break; } case FLOAT:{ t = jj_consume_token(FLOAT); ASTNUM jjtn002 = new ASTNUM(JJTNUM); boolean jjtc002 = true; jjtree.openNodeScope(jjtn002); try { jjtree.closeNodeScope(jjtn002, true); jjtc002 = false; jjtn002.value = new java.math.BigDecimal(t.image); } finally { if (jjtc002) { jjtree.closeNodeScope(jjtn002, true); } } break; } case QUOTE:{ jj_consume_token(QUOTE); label_2: while (true) { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case CHAR:{ ; break; } default: jj_la1[14] = jj_gen; break label_2; } t = jj_consume_token(CHAR); builder.append(t.image); } jj_consume_token(ENDQUOTE); ASTSTRING jjtn003 = new ASTSTRING(JJTSTRING); boolean jjtc003 = true; jjtree.openNodeScope(jjtn003); try { jjtree.closeNodeScope(jjtn003, true); jjtc003 = false; jjtn003.value = builder.toString(); } finally { if (jjtc003) { jjtree.closeNodeScope(jjtn003, true); } } break; } case SQUOTE:{ jj_consume_token(SQUOTE); label_3: while (true) { switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { case SCHAR:{ ; break; } default: jj_la1[15] = jj_gen; break label_3; } t = jj_consume_token(SCHAR); builder.append(t.image); } jj_consume_token(SENDQUOTE); ASTSTRING jjtn004 = new ASTSTRING(JJTSTRING); boolean jjtc004 = true; jjtree.openNodeScope(jjtn004); try { jjtree.closeNodeScope(jjtn004, true); jjtc004 = false; jjtn004.value = builder.toString(); } finally { if (jjtc004) { jjtree.closeNodeScope(jjtn004, true); } } break; } case BOOLEAN:{ t = jj_consume_token(BOOLEAN); ASTBOOLEAN jjtn005 = new ASTBOOLEAN(JJTBOOLEAN); boolean jjtc005 = true; jjtree.openNodeScope(jjtn005); try { jjtree.closeNodeScope(jjtn005, true); jjtc005 = false; jjtn005.value = t.image; } finally { if (jjtc005) { jjtree.closeNodeScope(jjtn005, true); } } break; } case VARIABLE:{ t = jj_consume_token(VARIABLE); ASTVAR jjtn006 = new ASTVAR(JJTVAR); boolean jjtc006 = true; jjtree.openNodeScope(jjtn006); try { jjtree.closeNodeScope(jjtn006, true); jjtc006 = false; jjtn006.value = new Variable(t.image); } finally { if (jjtc006) { jjtree.closeNodeScope(jjtn006, true); } } break; } default: jj_la1[16] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } private boolean jj_2_1(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; try { return !jj_3_1(); } catch(LookaheadSuccess ls) { return true; } finally { jj_save(0, xla); } } private boolean jj_3R_8() { if (jj_3R_13()) return true; return false; } private boolean jj_3R_5() { if (jj_scan_token(NOT)) return true; return false; } private boolean jj_3R_7() { if (jj_3R_12()) return true; return false; } private boolean jj_3R_6() { Token xsp; xsp = jj_scanpos; jj_lookingAhead = true; jj_semLA = getToken(2).kind==BETWEEN || getToken(3).kind==BETWEEN; jj_lookingAhead = false; if (!jj_semLA || jj_3R_7()) { jj_scanpos = xsp; jj_lookingAhead = true; jj_semLA = getToken(2).kind==IN || getToken(3).kind==IN; jj_lookingAhead = false; if (!jj_semLA || jj_3R_8()) { jj_scanpos = xsp; jj_lookingAhead = true; jj_semLA = getToken(2).kind==LIKE || getToken(3).kind==LIKE; jj_lookingAhead = false; if (!jj_semLA || jj_3R_9()) { jj_scanpos = xsp; jj_lookingAhead = true; jj_semLA = getToken(2).kind==IS; jj_lookingAhead = false; if (!jj_semLA || jj_3R_10()) { jj_scanpos = xsp; if (jj_3R_11()) return true; } } } } return false; } private boolean jj_3R_4() { Token xsp; xsp = jj_scanpos; if (jj_3R_5()) jj_scanpos = xsp; if (jj_3R_6()) return true; return false; } private boolean jj_3_1() { if (jj_3R_4()) return true; return false; } private boolean jj_3R_16() { if (jj_3R_17()) return true; return false; } private boolean jj_3R_15() { if (jj_3R_17()) return true; return false; } private boolean jj_3R_14() { if (jj_3R_17()) return true; return false; } private boolean jj_3R_13() { if (jj_3R_17()) return true; return false; } private boolean jj_3R_12() { if (jj_3R_17()) return true; return false; } private boolean jj_3R_23() { if (jj_scan_token(VARIABLE)) return true; return false; } private boolean jj_3R_22() { if (jj_scan_token(BOOLEAN)) return true; return false; } private boolean jj_3R_11() { if (jj_3R_16()) return true; return false; } private boolean jj_3R_21() { if (jj_scan_token(SQUOTE)) return true; return false; } private boolean jj_3R_20() { if (jj_scan_token(QUOTE)) return true; return false; } private boolean jj_3R_10() { if (jj_3R_15()) return true; return false; } private boolean jj_3R_19() { if (jj_scan_token(FLOAT)) return true; return false; } private boolean jj_3R_9() { if (jj_3R_14()) return true; return false; } private boolean jj_3R_17() { Token xsp; xsp = jj_scanpos; if (jj_3R_18()) { jj_scanpos = xsp; if (jj_3R_19()) { jj_scanpos = xsp; if (jj_3R_20()) { jj_scanpos = xsp; if (jj_3R_21()) { jj_scanpos = xsp; if (jj_3R_22()) { jj_scanpos = xsp; if (jj_3R_23()) return true; } } } } } return false; } private boolean jj_3R_18() { if (jj_scan_token(INT)) return true; return false; } /** Generated Token Manager. */ public PartitionParserTokenManager token_source; SimpleCharStream jj_input_stream; /** Current token. */ public Token token; /** Next token. */ public Token jj_nt; private int jj_ntk; private Token jj_scanpos, jj_lastpos; private int jj_la; /** Whether we are looking ahead. */ private boolean jj_lookingAhead = false; private boolean jj_semLA; private int jj_gen; final private int[] jj_la1 = new int[17]; static private int[] jj_la1_0; static private int[] jj_la1_1; static { jj_la1_init_0(); jj_la1_init_1(); } private static void jj_la1_init_0() { jj_la1_0 = new int[] {0xc0000,0xc0000,0xc0000,0xc0000,0x200,0x100000,0xd8005000,0x100000,0x100000,0x800,0x100000,0x100000,0x2001f8,0x2001f8,0x0,0x0,0xd8005000,}; } private static void jj_la1_init_1() { jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2,0x8,0x0,}; } final private JJCalls[] jj_2_rtns = new JJCalls[1]; private boolean jj_rescan = false; private int jj_gc = 0; /** Constructor with InputStream. */ public PartitionParser(java.io.InputStream stream) { this(stream, null); } /** Constructor with InputStream and supplied encoding */ public PartitionParser(java.io.InputStream stream, String encoding) { try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } token_source = new PartitionParserTokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 17; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } /** Reinitialise. */ public void ReInit(java.io.InputStream stream) { ReInit(stream, null); } /** Reinitialise. */ public void ReInit(java.io.InputStream stream, String encoding) { try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jjtree.reset(); jj_gen = 0; for (int i = 0; i < 17; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } /** Constructor. */ public PartitionParser(java.io.Reader stream) { jj_input_stream = new SimpleCharStream(stream, 1, 1); token_source = new PartitionParserTokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 17; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } /** Reinitialise. */ public void ReInit(java.io.Reader stream) { if (jj_input_stream == null) { jj_input_stream = new SimpleCharStream(stream, 1, 1); } else { jj_input_stream.ReInit(stream, 1, 1); } if (token_source == null) { token_source = new PartitionParserTokenManager(jj_input_stream); } token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jjtree.reset(); jj_gen = 0; for (int i = 0; i < 17; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } /** Constructor with generated Token Manager. */ public PartitionParser(PartitionParserTokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 17; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } /** Reinitialise. */ public void ReInit(PartitionParserTokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jjtree.reset(); jj_gen = 0; for (int i = 0; i < 17; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } private Token jj_consume_token(int kind) throws ParseException { Token oldToken; if ((oldToken = token).next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; if (token.kind == kind) { jj_gen++; if (++jj_gc > 100) { jj_gc = 0; for (int i = 0; i < jj_2_rtns.length; i++) { JJCalls c = jj_2_rtns[i]; while (c != null) { if (c.gen < jj_gen) c.first = null; c = c.next; } } } return token; } token = oldToken; jj_kind = kind; throw generateParseException(); } @SuppressWarnings("serial") static private final class LookaheadSuccess extends java.lang.Error { } final private LookaheadSuccess jj_ls = new LookaheadSuccess(); private boolean jj_scan_token(int kind) { if (jj_scanpos == jj_lastpos) { jj_la--; if (jj_scanpos.next == null) { jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken(); } else { jj_lastpos = jj_scanpos = jj_scanpos.next; } } else { jj_scanpos = jj_scanpos.next; } if (jj_rescan) { int i = 0; Token tok = token; while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; } if (tok != null) jj_add_error_token(kind, i); } if (jj_scanpos.kind != kind) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls; return false; } /** Get the next Token. */ final public Token getNextToken() { if (token.next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; jj_gen++; return token; } /** Get the specific Token. */ final public Token getToken(int index) { Token t = jj_lookingAhead ? jj_scanpos : token; for (int i = 0; i < index; i++) { if (t.next != null) t = t.next; else t = t.next = token_source.getNextToken(); } return t; } private int jj_ntk_f() { if ((jj_nt=token.next) == null) return (jj_ntk = (token.next=token_source.getNextToken()).kind); else return (jj_ntk = jj_nt.kind); } private java.util.List<int[]> jj_expentries = new java.util.ArrayList<int[]>(); private int[] jj_expentry; private int jj_kind = -1; private int[] jj_lasttokens = new int[100]; private int jj_endpos; private void jj_add_error_token(int kind, int pos) { if (pos >= 100) { return; } if (pos == jj_endpos + 1) { jj_lasttokens[jj_endpos++] = kind; } else if (jj_endpos != 0) { jj_expentry = new int[jj_endpos]; for (int i = 0; i < jj_endpos; i++) { jj_expentry[i] = jj_lasttokens[i]; } for (int[] oldentry : jj_expentries) { if (oldentry.length == jj_expentry.length) { boolean isMatched = true; for (int i = 0; i < jj_expentry.length; i++) { if (oldentry[i] != jj_expentry[i]) { isMatched = false; break; } } if (isMatched) { jj_expentries.add(jj_expentry); break; } } } if (pos != 0) { jj_lasttokens[(jj_endpos = pos) - 1] = kind; } } } /** Generate ParseException. */ public ParseException generateParseException() { jj_expentries.clear(); boolean[] la1tokens = new boolean[36]; if (jj_kind >= 0) { la1tokens[jj_kind] = true; jj_kind = -1; } for (int i = 0; i < 17; i++) { if (jj_la1[i] == jj_gen) { for (int j = 0; j < 32; j++) { if ((jj_la1_0[i] & (1<<j)) != 0) { la1tokens[j] = true; } if ((jj_la1_1[i] & (1<<j)) != 0) { la1tokens[32+j] = true; } } } } for (int i = 0; i < 36; i++) { if (la1tokens[i]) { jj_expentry = new int[1]; jj_expentry[0] = i; jj_expentries.add(jj_expentry); } } jj_endpos = 0; jj_rescan_token(); jj_add_error_token(0, 0); int[][] exptokseq = new int[jj_expentries.size()][]; for (int i = 0; i < jj_expentries.size(); i++) { exptokseq[i] = jj_expentries.get(i); } return new ParseException(token, exptokseq, tokenImage); } /** Enable tracing. */ final public void enable_tracing() { } /** Disable tracing. */ final public void disable_tracing() { } private void jj_rescan_token() { jj_rescan = true; for (int i = 0; i < 1; i++) { try { JJCalls p = jj_2_rtns[i]; do { if (p.gen > jj_gen) { jj_la = p.arg; jj_lastpos = jj_scanpos = p.first; switch (i) { case 0: jj_3_1(); break; } } p = p.next; } while (p != null); } catch(LookaheadSuccess ls) { } } jj_rescan = false; } private void jj_save(int index, int xla) { JJCalls p = jj_2_rtns[index]; while (p.gen > jj_gen) { if (p.next == null) { p = p.next = new JJCalls(); break; } p = p.next; } p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla; } static final class JJCalls { int gen; Token first; int arg; JJCalls next; } }
9,908
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ParseException.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. ParseException.java Version 6.1 */ /* JavaCCOptions:KEEP_LINE_COLUMN=true */ package com.netflix.metacat.common.server.partition.parser; /** * This exception is thrown when parse errors are encountered. * You can explicitly create objects of this exception type by * calling the method generateParseException in the generated * parser. * * You can modify this class to customize your error reporting * mechanisms so long as you retain the public fields. */ public class ParseException extends Exception { /** * The version identifier for this Serializable class. * Increment only if the <i>serialized</i> form of the * class changes. */ private static final long serialVersionUID = 1L; /** * The end of line string for this machine. */ protected static String EOL = System.getProperty("line.separator", "\n"); /** * This constructor is used by the method "generateParseException" * in the generated parser. Calling this constructor generates * a new object of this type with the fields "currentToken", * "expectedTokenSequences", and "tokenImage" set. */ public ParseException(Token currentTokenVal, int[][] expectedTokenSequencesVal, String[] tokenImageVal ) { super(initialise(currentTokenVal, expectedTokenSequencesVal, tokenImageVal)); currentToken = currentTokenVal; expectedTokenSequences = expectedTokenSequencesVal; tokenImage = tokenImageVal; } /** * The following constructors are for use by you for whatever * purpose you can think of. Constructing the exception in this * manner makes the exception behave in the normal way - i.e., as * documented in the class "Throwable". The fields "errorToken", * "expectedTokenSequences", and "tokenImage" do not contain * relevant information. The JavaCC generated code does not use * these constructors. */ public ParseException() { super(); } /** Constructor with message. */ public ParseException(String message) { super(message); } /** * This is the last token that has been consumed successfully. If * this object has been created due to a parse error, the token * followng this token will (therefore) be the first error token. */ public Token currentToken; /** * Each entry in this array is an array of integers. Each array * of integers represents a sequence of tokens (by their ordinal * values) that is expected at this point of the parse. */ public int[][] expectedTokenSequences; /** * This is a reference to the "tokenImage" array of the generated * parser within which the parse error occurred. This array is * defined in the generated ...Constants interface. */ public String[] tokenImage; /** * It uses "currentToken" and "expectedTokenSequences" to generate a parse * error message and returns it. If this object has been created * due to a parse error, and you do not catch it (it gets thrown * from the parser) the correct error message * gets displayed. */ private static String initialise(Token currentToken, int[][] expectedTokenSequences, String[] tokenImage) { StringBuffer expected = new StringBuffer(); int maxSize = 0; for (int i = 0; i < expectedTokenSequences.length; i++) { if (maxSize < expectedTokenSequences[i].length) { maxSize = expectedTokenSequences[i].length; } for (int j = 0; j < expectedTokenSequences[i].length; j++) { expected.append(tokenImage[expectedTokenSequences[i][j]]).append(' '); } if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) { expected.append("..."); } expected.append(EOL).append(" "); } String retval = "Encountered \""; Token tok = currentToken.next; for (int i = 0; i < maxSize; i++) { if (i != 0) retval += " "; if (tok.kind == 0) { retval += tokenImage[0]; break; } retval += " " + tokenImage[tok.kind]; retval += " \""; retval += add_escapes(tok.image); retval += " \""; tok = tok.next; } retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn; retval += "." + EOL; if (expectedTokenSequences.length == 0) { // Nothing to add here } else { if (expectedTokenSequences.length == 1) { retval += "Was expecting:" + EOL + " "; } else { retval += "Was expecting one of:" + EOL + " "; } retval += expected.toString(); } return retval; } /** * Used to convert raw characters to their escaped version * when these raw version cannot be used as part of an ASCII * string literal. */ static String add_escapes(String str) { StringBuffer retval = new StringBuffer(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } } /* JavaCC - OriginalChecksum=49058b52f63ed1ca5d5417de303d18a1 (do not edit this line) */
9,909
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTGTE.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTGTE.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTGTE extends SimpleNode { public ASTGTE(int id) { super(id); } public ASTGTE(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=64550e3fbfe981b802deef725b683e1a (do not edit this line) */
9,910
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/Variable.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.partition.parser; public class Variable { private String name; public Variable(String name) { this.setName(name); } public String getName() { return name; } public void setName(String name) { this.name = name; } }
9,911
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/PartitionParserTreeConstants.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. PartitionParserTreeConstants.java Version 6.1_2 */ package com.netflix.metacat.common.server.partition.parser; public interface PartitionParserTreeConstants { public int JJTFILTER = 0; public int JJTVOID = 1; public int JJTAND = 2; public int JJTOR = 3; public int JJTNOT = 4; public int JJTBETWEEN = 5; public int JJTIN = 6; public int JJTLIKE = 7; public int JJTNULL = 8; public int JJTCOMPARE = 9; public int JJTGT = 10; public int JJTLT = 11; public int JJTLTE = 12; public int JJTGTE = 13; public int JJTEQ = 14; public int JJTNEQ = 15; public int JJTMATCHES = 16; public int JJTNUM = 17; public int JJTSTRING = 18; public int JJTBOOLEAN = 19; public int JJTVAR = 20; public String[] jjtNodeName = { "FILTER", "void", "AND", "OR", "NOT", "BETWEEN", "IN", "LIKE", "NULL", "COMPARE", "GT", "LT", "LTE", "GTE", "EQ", "NEQ", "MATCHES", "NUM", "STRING", "BOOLEAN", "VAR", }; } /* JavaCC - OriginalChecksum=a96463cb7122b404fe59a717feec7105 (do not edit this line) */
9,912
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/PartitionParserDefaultVisitor.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. PartitionParserDefaultVisitor.java Version 6.1_2 */ package com.netflix.metacat.common.server.partition.parser; public class PartitionParserDefaultVisitor implements PartitionParserVisitor{ public Object defaultVisit(SimpleNode node, Object data){ node.childrenAccept(this, data); return data; } public Object visit(SimpleNode node, Object data){ return defaultVisit(node, data); } public Object visit(ASTFILTER node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAND node, Object data){ return defaultVisit(node, data); } public Object visit(ASTOR node, Object data){ return defaultVisit(node, data); } public Object visit(ASTNOT node, Object data){ return defaultVisit(node, data); } public Object visit(ASTBETWEEN node, Object data){ return defaultVisit(node, data); } public Object visit(ASTIN node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLIKE node, Object data){ return defaultVisit(node, data); } public Object visit(ASTNULL node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCOMPARE node, Object data){ return defaultVisit(node, data); } public Object visit(ASTGT node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLT node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLTE node, Object data){ return defaultVisit(node, data); } public Object visit(ASTGTE node, Object data){ return defaultVisit(node, data); } public Object visit(ASTEQ node, Object data){ return defaultVisit(node, data); } public Object visit(ASTNEQ node, Object data){ return defaultVisit(node, data); } public Object visit(ASTMATCHES node, Object data){ return defaultVisit(node, data); } public Object visit(ASTNUM node, Object data){ return defaultVisit(node, data); } public Object visit(ASTSTRING node, Object data){ return defaultVisit(node, data); } public Object visit(ASTBOOLEAN node, Object data){ return defaultVisit(node, data); } public Object visit(ASTVAR node, Object data){ return defaultVisit(node, data); } } /* JavaCC - OriginalChecksum=bf03e212165a1a62c99d5fccedf6e302 (do not edit this line) */
9,913
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTVAR.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTVAR.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTVAR extends SimpleNode { public ASTVAR(int id) { super(id); } public ASTVAR(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=69622d5c2212551b77e16c4e9145bc5c (do not edit this line) */
9,914
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/PartitionParserConstants.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree&JavaCC: Do not edit this line. PartitionParserConstants.java */ package com.netflix.metacat.common.server.partition.parser; /** * Token literal values and constants. * Generated by org.javacc.parser.OtherFilesGen#start() */ public interface PartitionParserConstants { /** End of File. */ int EOF = 0; /** RegularExpression Id. */ int EQUAL = 3; /** RegularExpression Id. */ int NEQUAL = 4; /** RegularExpression Id. */ int GTE = 5; /** RegularExpression Id. */ int GT = 6; /** RegularExpression Id. */ int LTE = 7; /** RegularExpression Id. */ int LT = 8; /** RegularExpression Id. */ int LPAREN = 9; /** RegularExpression Id. */ int RPAREN = 10; /** RegularExpression Id. */ int COMMA = 11; /** RegularExpression Id. */ int INT = 12; /** RegularExpression Id. */ int DIGIT = 13; /** RegularExpression Id. */ int FLOAT = 14; /** RegularExpression Id. */ int EXPONENT = 15; /** RegularExpression Id. */ int MANTISSA = 16; /** RegularExpression Id. */ int DIGITS = 17; /** RegularExpression Id. */ int AND = 18; /** RegularExpression Id. */ int OR = 19; /** RegularExpression Id. */ int NOT = 20; /** RegularExpression Id. */ int MATCHES = 21; /** RegularExpression Id. */ int LIKE = 22; /** RegularExpression Id. */ int IS = 23; /** RegularExpression Id. */ int NULL = 24; /** RegularExpression Id. */ int BETWEEN = 25; /** RegularExpression Id. */ int IN = 26; /** RegularExpression Id. */ int BOOLEAN = 27; /** RegularExpression Id. */ int VARIABLE = 28; /** RegularExpression Id. */ int CHARS = 29; /** RegularExpression Id. */ int QUOTE = 30; /** RegularExpression Id. */ int SQUOTE = 31; /** RegularExpression Id. */ int ENDQUOTE = 32; /** RegularExpression Id. */ int CHAR = 33; /** RegularExpression Id. */ int SENDQUOTE = 34; /** RegularExpression Id. */ int SCHAR = 35; /** Lexical state. */ int DEFAULT = 0; /** Lexical state. */ int STRING_STATE = 1; /** Lexical state. */ int SSTRING_STATE = 2; /** Literal token values. */ String[] tokenImage = { "<EOF>", "\" \"", "\"\\t\"", "<EQUAL>", "<NEQUAL>", "\">=\"", "\">\"", "\"<=\"", "\"<\"", "\"(\"", "\")\"", "\",\"", "<INT>", "<DIGIT>", "<FLOAT>", "<EXPONENT>", "<MANTISSA>", "<DIGITS>", "<AND>", "<OR>", "\"not\"", "\"matches\"", "\"like\"", "\"is\"", "\"null\"", "\"between\"", "\"in\"", "<BOOLEAN>", "<VARIABLE>", "<CHARS>", "\"\\\"\"", "\"\\\'\"", "<ENDQUOTE>", "<CHAR>", "<SENDQUOTE>", "<SCHAR>", }; }
9,915
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/SimpleCharStream.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. SimpleCharStream.java Version 6.1 */ /* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; /** * An implementation of interface CharStream, where the stream is assumed to * contain only ASCII characters (without unicode processing). */ public class SimpleCharStream { /** Whether parser is static. */ public static final boolean staticFlag = false; int bufsize; int available; int tokenBegin; /** Position in buffer. */ public int bufpos = -1; protected int bufline[]; protected int bufcolumn[]; protected int column = 0; protected int line = 1; protected boolean prevCharIsCR = false; protected boolean prevCharIsLF = false; protected java.io.Reader inputStream; protected char[] buffer; protected int maxNextCharInd = 0; protected int inBuf = 0; protected int tabSize = 1; protected boolean trackLineColumn = true; public void setTabSize(int i) { tabSize = i; } public int getTabSize() { return tabSize; } protected void ExpandBuff(boolean wrapAround) { char[] newbuffer = new char[bufsize + 2048]; int newbufline[] = new int[bufsize + 2048]; int newbufcolumn[] = new int[bufsize + 2048]; try { if (wrapAround) { System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); System.arraycopy(buffer, 0, newbuffer, bufsize - tokenBegin, bufpos); buffer = newbuffer; System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); System.arraycopy(bufline, 0, newbufline, bufsize - tokenBegin, bufpos); bufline = newbufline; System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); System.arraycopy(bufcolumn, 0, newbufcolumn, bufsize - tokenBegin, bufpos); bufcolumn = newbufcolumn; maxNextCharInd = (bufpos += (bufsize - tokenBegin)); } else { System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); buffer = newbuffer; System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); bufline = newbufline; System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); bufcolumn = newbufcolumn; maxNextCharInd = (bufpos -= tokenBegin); } } catch (Throwable t) { throw new Error(t.getMessage()); } bufsize += 2048; available = bufsize; tokenBegin = 0; } protected void FillBuff() throws java.io.IOException { if (maxNextCharInd == available) { if (available == bufsize) { if (tokenBegin > 2048) { bufpos = maxNextCharInd = 0; available = tokenBegin; } else if (tokenBegin < 0) bufpos = maxNextCharInd = 0; else ExpandBuff(false); } else if (available > tokenBegin) available = bufsize; else if ((tokenBegin - available) < 2048) ExpandBuff(true); else available = tokenBegin; } int i; try { if ((i = inputStream.read(buffer, maxNextCharInd, available - maxNextCharInd)) == -1) { inputStream.close(); throw new java.io.IOException(); } else maxNextCharInd += i; return; } catch(java.io.IOException e) { --bufpos; backup(0); if (tokenBegin == -1) tokenBegin = bufpos; throw e; } } /** Start. */ public char BeginToken() throws java.io.IOException { tokenBegin = -1; char c = readChar(); tokenBegin = bufpos; return c; } protected void UpdateLineColumn(char c) { column++; if (prevCharIsLF) { prevCharIsLF = false; line += (column = 1); } else if (prevCharIsCR) { prevCharIsCR = false; if (c == '\n') { prevCharIsLF = true; } else line += (column = 1); } switch (c) { case '\r' : prevCharIsCR = true; break; case '\n' : prevCharIsLF = true; break; case '\t' : column--; column += (tabSize - (column % tabSize)); break; default : break; } bufline[bufpos] = line; bufcolumn[bufpos] = column; } /** Read a character. */ public char readChar() throws java.io.IOException { if (inBuf > 0) { --inBuf; if (++bufpos == bufsize) bufpos = 0; return buffer[bufpos]; } if (++bufpos >= maxNextCharInd) FillBuff(); char c = buffer[bufpos]; UpdateLineColumn(c); return c; } @Deprecated /** * @deprecated * @see #getEndColumn */ public int getColumn() { return bufcolumn[bufpos]; } @Deprecated /** * @deprecated * @see #getEndLine */ public int getLine() { return bufline[bufpos]; } /** Get token end column number. */ public int getEndColumn() { return bufcolumn[bufpos]; } /** Get token end line number. */ public int getEndLine() { return bufline[bufpos]; } /** Get token beginning column number. */ public int getBeginColumn() { return bufcolumn[tokenBegin]; } /** Get token beginning line number. */ public int getBeginLine() { return bufline[tokenBegin]; } /** Backup a number of characters. */ public void backup(int amount) { inBuf += amount; if ((bufpos -= amount) < 0) bufpos += bufsize; } /** Constructor. */ public SimpleCharStream(java.io.Reader dstream, int startline, int startcolumn, int buffersize) { inputStream = dstream; line = startline; column = startcolumn - 1; available = bufsize = buffersize; buffer = new char[buffersize]; bufline = new int[buffersize]; bufcolumn = new int[buffersize]; } /** Constructor. */ public SimpleCharStream(java.io.Reader dstream, int startline, int startcolumn) { this(dstream, startline, startcolumn, 4096); } /** Constructor. */ public SimpleCharStream(java.io.Reader dstream) { this(dstream, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.Reader dstream, int startline, int startcolumn, int buffersize) { inputStream = dstream; line = startline; column = startcolumn - 1; if (buffer == null || buffersize != buffer.length) { available = bufsize = buffersize; buffer = new char[buffersize]; bufline = new int[buffersize]; bufcolumn = new int[buffersize]; } prevCharIsLF = prevCharIsCR = false; tokenBegin = inBuf = maxNextCharInd = 0; bufpos = -1; } /** Reinitialise. */ public void ReInit(java.io.Reader dstream, int startline, int startcolumn) { ReInit(dstream, startline, startcolumn, 4096); } /** Reinitialise. */ public void ReInit(java.io.Reader dstream) { ReInit(dstream, 1, 1, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException { this(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, int startline, int startcolumn, int buffersize) { this(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, int startcolumn) throws java.io.UnsupportedEncodingException { this(dstream, encoding, startline, startcolumn, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, int startline, int startcolumn) { this(dstream, startline, startcolumn, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException { this(dstream, encoding, 1, 1, 4096); } /** Constructor. */ public SimpleCharStream(java.io.InputStream dstream) { this(dstream, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, String encoding, int startline, int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException { ReInit(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, int startline, int startcolumn, int buffersize) { ReInit(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException { ReInit(dstream, encoding, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream) { ReInit(dstream, 1, 1, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, String encoding, int startline, int startcolumn) throws java.io.UnsupportedEncodingException { ReInit(dstream, encoding, startline, startcolumn, 4096); } /** Reinitialise. */ public void ReInit(java.io.InputStream dstream, int startline, int startcolumn) { ReInit(dstream, startline, startcolumn, 4096); } /** Get token literal value. */ public String GetImage() { if (bufpos >= tokenBegin) return new String(buffer, tokenBegin, bufpos - tokenBegin + 1); else return new String(buffer, tokenBegin, bufsize - tokenBegin) + new String(buffer, 0, bufpos + 1); } /** Get the suffix. */ public char[] GetSuffix(int len) { char[] ret = new char[len]; if ((bufpos + 1) >= len) System.arraycopy(buffer, bufpos - len + 1, ret, 0, len); else { System.arraycopy(buffer, bufsize - (len - bufpos - 1), ret, 0, len - bufpos - 1); System.arraycopy(buffer, 0, ret, len - bufpos - 1, bufpos + 1); } return ret; } /** Reset buffer when finished. */ public void Done() { buffer = null; bufline = null; bufcolumn = null; } /** * Method to adjust line and column numbers for the start of a token. */ public void adjustBeginLineColumn(int newLine, int newCol) { int start = tokenBegin; int len; if (bufpos >= tokenBegin) { len = bufpos - tokenBegin + inBuf + 1; } else { len = bufsize - tokenBegin + bufpos + 1 + inBuf; } int i = 0, j = 0, k = 0; int nextColDiff = 0, columnDiff = 0; while (i < len && bufline[j = start % bufsize] == bufline[k = ++start % bufsize]) { bufline[j] = newLine; nextColDiff = columnDiff + bufcolumn[k] - bufcolumn[j]; bufcolumn[j] = newCol + columnDiff; columnDiff = nextColDiff; i++; } if (i < len) { bufline[j] = newLine++; bufcolumn[j] = newCol + columnDiff; while (i++ < len) { if (bufline[j = start % bufsize] != bufline[++start % bufsize]) bufline[j] = newLine++; else bufline[j] = newLine; } } line = bufline[j]; column = bufcolumn[j]; } boolean getTrackLineColumn() { return trackLineColumn; } void setTrackLineColumn(boolean tlc) { trackLineColumn = tlc; } } /* JavaCC - OriginalChecksum=c34799d44c4c2ce99700976e5f2a9064 (do not edit this line) */
9,916
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTLTE.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTLTE.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTLTE extends SimpleNode { public ASTLTE(int id) { super(id); } public ASTLTE(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=5166b2cf4d389162d081829f20e53bb9 (do not edit this line) */
9,917
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTLIKE.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTLIKE.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTLIKE extends SimpleNode { public boolean not; public ASTLIKE(int id) { super(id); } public ASTLIKE(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=4c9049dc18265f1076e67d7fbab0250d (do not edit this line) */
9,918
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTIN.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTIN.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTIN extends SimpleNode { public boolean not; public ASTIN(int id) { super(id); } public ASTIN(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=740044ed6bd874710eabdf47e1b15683 (do not edit this line) */
9,919
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/Token.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JavaCC: Do not edit this line. Token.java Version 6.1 */ /* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; /** * Describes the input token stream. */ public class Token implements java.io.Serializable { /** * The version identifier for this Serializable class. * Increment only if the <i>serialized</i> form of the * class changes. */ private static final long serialVersionUID = 1L; /** * An integer that describes the kind of this token. This numbering * system is determined by JavaCCParser, and a table of these numbers is * stored in the file ...Constants.java. */ public int kind; /** The line number of the first character of this Token. */ public int beginLine; /** The column number of the first character of this Token. */ public int beginColumn; /** The line number of the last character of this Token. */ public int endLine; /** The column number of the last character of this Token. */ public int endColumn; /** * The string image of the token. */ public String image; /** * A reference to the next regular (non-special) token from the input * stream. If this is the last token from the input stream, or if the * token manager has not read tokens beyond this one, this field is * set to null. This is true only if this token is also a regular * token. Otherwise, see below for a description of the contents of * this field. */ public Token next; /** * This field is used to access special tokens that occur prior to this * token, but after the immediately preceding regular (non-special) token. * If there are no such special tokens, this field is set to null. * When there are more than one such special token, this field refers * to the last of these special tokens, which in turn refers to the next * previous special token through its specialToken field, and so on * until the first special token (whose specialToken field is null). * The next fields of special tokens refer to other special tokens that * immediately follow it (without an intervening regular token). If there * is no such token, this field is null. */ public Token specialToken; /** * An optional attribute value of the Token. * Tokens which are not used as syntactic sugar will often contain * meaningful values that will be used later on by the compiler or * interpreter. This attribute value is often different from the image. * Any subclass of Token that actually wants to return a non-null value can * override this method as appropriate. */ public Object getValue() { return null; } /** * No-argument constructor */ public Token() {} /** * Constructs a new token for the specified Image. */ public Token(int kind) { this(kind, null); } /** * Constructs a new token for the specified Image and Kind. */ public Token(int kind, String image) { this.kind = kind; this.image = image; } /** * Returns the image. */ public String toString() { return image; } /** * Returns a new Token object, by default. However, if you want, you * can create and return subclass objects based on the value of ofKind. * Simply add the cases to the switch for all those special cases. * For example, if you have a subclass of Token called IDToken that * you want to create if ofKind is ID, simply add something like : * * case MyParserConstants.ID : return new IDToken(ofKind, image); * * to the following switch statement. Then you can cast matchedToken * variable to the appropriate type and use sit in your lexical actions. */ public static Token newToken(int ofKind, String image) { switch(ofKind) { default : return new Token(ofKind, image); } } public static Token newToken(int ofKind) { return newToken(ofKind, null); } } /* JavaCC - OriginalChecksum=0a3aa83d3f2cdefa7bb0715471f36007 (do not edit this line) */
9,920
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTGT.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTGT.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTGT extends SimpleNode { public ASTGT(int id) { super(id); } public ASTGT(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=97fa861ee8d9421ccb94612e513bc388 (do not edit this line) */
9,921
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/SimpleNode.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. SimpleNode.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class SimpleNode implements Node { protected Node parent; protected Node[] children; protected int id; protected Object value; protected PartitionParser parser; public SimpleNode(int i) { id = i; } public SimpleNode(PartitionParser p, int i) { this(i); parser = p; } public void jjtOpen() { } public void jjtClose() { } public void jjtSetParent(Node n) { parent = n; } public Node jjtGetParent() { return parent; } public void jjtAddChild(Node n, int i) { if (children == null) { children = new Node[i + 1]; } else if (i >= children.length) { Node c[] = new Node[i + 1]; System.arraycopy(children, 0, c, 0, children.length); children = c; } children[i] = n; } public Node jjtGetChild(int i) { return children[i]; } public int jjtGetNumChildren() { return (children == null) ? 0 : children.length; } public void jjtSetValue(Object value) { this.value = value; } public Object jjtGetValue() { return value; } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } /** Accept the visitor. **/ public Object childrenAccept(PartitionParserVisitor visitor, Object data) { if (children != null) { for (int i = 0; i < children.length; ++i) { children[i].jjtAccept(visitor, data); } } return data; } /* You can override these two methods in subclasses of SimpleNode to customize the way the node appears when the tree is dumped. If your output uses more than one line you should override toString(String), otherwise overriding toString() is probably all you need to do. */ public String toString() { return PartitionParserTreeConstants.jjtNodeName[id]; } public String toString(String prefix) { return prefix + toString(); } /* Override this method if you want to customize how the node dumps out its children. */ public void dump(String prefix) { System.out.println(toString(prefix)); if (children != null) { for (int i = 0; i < children.length; ++i) { SimpleNode n = (SimpleNode) children[i]; if (n != null) { n.dump(prefix + " "); } } } } public int getId() { return id; } } /* JavaCC - OriginalChecksum=d933f51a60f5df18ee957e3447d1a0d6 (do not edit this line) */
9,922
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTCOMPARE.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTCOMPARE.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTCOMPARE extends SimpleNode { public ASTCOMPARE(int id) { super(id); } public ASTCOMPARE(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=293b662dd68f567a1f0f972651310eac (do not edit this line) */
9,923
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTMATCHES.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTMATCHES.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTMATCHES extends SimpleNode { public ASTMATCHES(int id) { super(id); } public ASTMATCHES(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=db244b83ab6cf6833b7dbd96aea93a2c (do not edit this line) */
9,924
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/parser/ASTSTRING.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Generated By:JJTree: Do not edit this line. ASTSTRING.java Version 6.1 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.netflix.metacat.common.server.partition.parser; public class ASTSTRING extends SimpleNode { public ASTSTRING(int id) { super(id); } public ASTSTRING(PartitionParser p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(PartitionParserVisitor visitor, Object data) { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=8d75dfe50ee57545277a64f4f0a63221 (do not edit this line) */
9,925
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/visitor/PartitionParserEval.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.partition.visitor; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.netflix.metacat.common.server.partition.parser.ASTAND; import com.netflix.metacat.common.server.partition.parser.ASTBETWEEN; import com.netflix.metacat.common.server.partition.parser.ASTBOOLEAN; import com.netflix.metacat.common.server.partition.parser.ASTCOMPARE; import com.netflix.metacat.common.server.partition.parser.ASTEQ; import com.netflix.metacat.common.server.partition.parser.ASTFILTER; import com.netflix.metacat.common.server.partition.parser.ASTGT; import com.netflix.metacat.common.server.partition.parser.ASTGTE; import com.netflix.metacat.common.server.partition.parser.ASTIN; import com.netflix.metacat.common.server.partition.parser.ASTLIKE; import com.netflix.metacat.common.server.partition.parser.ASTLT; import com.netflix.metacat.common.server.partition.parser.ASTLTE; import com.netflix.metacat.common.server.partition.parser.ASTMATCHES; import com.netflix.metacat.common.server.partition.parser.ASTNEQ; import com.netflix.metacat.common.server.partition.parser.ASTNOT; import com.netflix.metacat.common.server.partition.parser.ASTNULL; import com.netflix.metacat.common.server.partition.parser.ASTNUM; import com.netflix.metacat.common.server.partition.parser.ASTOR; import com.netflix.metacat.common.server.partition.parser.ASTSTRING; import com.netflix.metacat.common.server.partition.parser.ASTVAR; import com.netflix.metacat.common.server.partition.parser.PartitionParserVisitor; import com.netflix.metacat.common.server.partition.parser.SimpleNode; import com.netflix.metacat.common.server.partition.parser.Variable; import java.math.BigDecimal; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Partition Expression Visitor. */ public class PartitionParserEval implements PartitionParserVisitor { /** Like patterns. */ public static final Pattern LIKE_PATTERN = Pattern.compile("(\\[%\\]|\\[_\\]|\\[\\[\\]|%|_)"); /** LIKE to Regex token replacements. */ public static final Map<String, String> LIKE_TO_REGEX_REPLACEMENTS = new ImmutableMap.Builder<String, String>() .put("[%]", "%") .put("[_]", "_") .put("[[]", "[") .put("%", ".*") .put("_", ".").build(); /** Compare enum. */ public enum Compare { /** Compare. */ EQ("="), GT(">"), GTE(">="), LT("<"), LTE("<="), NEQ("!="), MATCHES("MATCHES"), LIKE("LIKE"); private String expression; Compare(final String expression) { this.expression = expression; } public String getExpression() { return expression; } } private Map<String, String> context; /** * Constructor. */ public PartitionParserEval() { this(Maps.newHashMap()); } /** * Constructor. * @param context context parameters */ public PartitionParserEval(final Map<String, String> context) { this.context = context; } /** * Compares. * @param node node in the tree * @param data data * @return comparison result */ public Boolean evalCompare(final SimpleNode node, final Object data) { final Object value1 = node.jjtGetChild(0).jjtAccept(this, data); final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data); final Object value2 = node.jjtGetChild(2).jjtAccept(this, data); return compare(comparison, value1, value2); } /** * Compare value1 and value2. * @param comparison comparison expression * @param value1 value * @param value2 value * @return comparison result */ @SuppressWarnings({ "unchecked", "rawtypes", "checkstyle:methodname" }) public boolean compare(final Compare comparison, final Object value1, final Object value2) { if (value1 == null) { switch (comparison) { case EQ: case MATCHES: case LIKE: return value2 == null; case NEQ: return value2 != null; default: return false; } } if (value2 instanceof String) { return _compare(comparison, value1.toString(), value2.toString()); } if (value2 instanceof BigDecimal) { final BigDecimal valueDecimal = new BigDecimal(value1.toString()); return _compare(comparison, valueDecimal, (BigDecimal) value2); } if (value1 instanceof Comparable && value2 instanceof Comparable) { return _compare(comparison, (Comparable) value1, (Comparable) value2); } throw new IllegalStateException("error processing partition filter"); } @SuppressWarnings({ "unchecked", "rawtypes", "checkstyle:methodname" }) private boolean _compare(final Compare comparison, final Comparable value1, final Comparable value2) { if (comparison.equals(Compare.MATCHES) || comparison.equals(Compare.LIKE)) { if (value2 != null) { String value = value2.toString(); if (comparison.equals(Compare.LIKE)) { value = sqlLiketoRegexExpression(value); } return value1.toString().matches(value); } } else { final int compare = value1.compareTo(value2); switch (comparison) { case GT: return compare > 0; case GTE: return compare >= 0; case LT: return compare < 0; case LTE: return compare <= 0; case EQ: return compare == 0; case NEQ: return compare != 0; default: return false; } } return false; } //TODO: Need to escape regex meta characters protected String sqlLiketoRegexExpression(final String likeExpression) { final Matcher m = LIKE_PATTERN.matcher(likeExpression); final StringBuffer builder = new StringBuffer(); while (m.find()) { m.appendReplacement(builder, LIKE_TO_REGEX_REPLACEMENTS.get(m.group())); } m.appendTail(builder); return builder.toString(); } @Override public Object visit(final ASTAND node, final Object data) { final Boolean v1 = (Boolean) node.jjtGetChild(0).jjtAccept(this, data); return v1 && (Boolean) node.jjtGetChild(1).jjtAccept(this, data); } @Override public Object visit(final ASTEQ node, final Object data) { return Compare.EQ; } @Override public Object visit(final ASTBETWEEN node, final Object data) { final Object value = node.jjtGetChild(0).jjtAccept(this, data); final Object startValue = node.jjtGetChild(1).jjtAccept(this, data); final Object endValue = node.jjtGetChild(2).jjtAccept(this, data); final boolean compare1 = compare(Compare.GTE, value, startValue); final boolean compare2 = compare(Compare.LTE, value, endValue); final boolean result = compare1 && compare2; return node.not != result; } @Override public Object visit(final ASTIN node, final Object data) { Object value = node.jjtGetChild(0).jjtAccept(this, data); boolean result = false; for (int i = 1; i < node.jjtGetNumChildren(); i++) { final Object inValue = node.jjtGetChild(i).jjtAccept(this, data); if (value != null && inValue instanceof BigDecimal) { value = new BigDecimal(value.toString()); } if ((value == null && inValue == null) || (value != null && value.equals(inValue))) { result = true; break; } } return node.not != result; } @Override public Object visit(final ASTCOMPARE node, final Object data) { if (node.jjtGetNumChildren() == 1) { return evalSingleTerm(node, data); } else { return evalCompare(node, data); } } private Boolean evalSingleTerm(final ASTCOMPARE node, final Object data) { Boolean result = Boolean.FALSE; final Object value = node.jjtGetChild(0).jjtAccept(this, data); if (value != null) { result = Boolean.parseBoolean(value.toString()); } return result; } @Override public Object visit(final ASTBOOLEAN node, final Object data) { return Boolean.parseBoolean(node.jjtGetValue().toString()); } @Override public Object visit(final ASTFILTER node, final Object data) { return node.jjtGetChild(0).jjtAccept(this, data); } @Override public Object visit(final ASTGT node, final Object data) { return Compare.GT; } @Override public Object visit(final ASTGTE node, final Object data) { return Compare.GTE; } @Override public Object visit(final ASTLT node, final Object data) { return Compare.LT; } @Override public Object visit(final ASTLTE node, final Object data) { return Compare.LTE; } @Override public Object visit(final ASTNEQ node, final Object data) { return Compare.NEQ; } @Override public Object visit(final ASTMATCHES node, final Object data) { return Compare.MATCHES; } @Override public Object visit(final ASTLIKE node, final Object data) { final Object value1 = node.jjtGetChild(0).jjtAccept(this, data); final Object value2 = node.jjtGetChild(1).jjtAccept(this, data); final boolean result = compare(Compare.LIKE, value1, value2); return node.not != result; } @Override public Object visit(final ASTNULL node, final Object data) { final Object value = node.jjtGetChild(0).jjtAccept(this, data); return node.not != (value == null); } @Override public Object visit(final ASTNUM node, final Object data) { return node.jjtGetValue(); } @Override public Object visit(final ASTOR node, final Object data) { final Boolean v1 = (Boolean) node.jjtGetChild(0).jjtAccept(this, data); return v1 || (Boolean) node.jjtGetChild(1).jjtAccept(this, data); } @Override public Object visit(final ASTNOT node, final Object data) { return !(Boolean) node.jjtGetChild(0).jjtAccept(this, data); } @Override public Object visit(final ASTSTRING node, final Object data) { return node.jjtGetValue(); } @Override public Object visit(final ASTVAR node, final Object data) { if (!context.containsKey(((Variable) node.jjtGetValue()).getName())) { throw new IllegalArgumentException("Missing variable: " + ((Variable) node.jjtGetValue()).getName()); } return context.get(((Variable) node.jjtGetValue()).getName()); } @Override public Object visit(final SimpleNode node, final Object data) { return null; } }
9,926
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/visitor/PartitionKeyParserEval.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.partition.visitor; import com.google.common.collect.Sets; import com.netflix.metacat.common.server.partition.parser.ASTAND; import com.netflix.metacat.common.server.partition.parser.ASTBETWEEN; import com.netflix.metacat.common.server.partition.parser.ASTCOMPARE; import com.netflix.metacat.common.server.partition.parser.ASTEQ; import com.netflix.metacat.common.server.partition.parser.ASTIN; import com.netflix.metacat.common.server.partition.parser.ASTLIKE; import com.netflix.metacat.common.server.partition.parser.ASTNOT; import com.netflix.metacat.common.server.partition.parser.ASTNULL; import com.netflix.metacat.common.server.partition.parser.ASTOR; import com.netflix.metacat.common.server.partition.parser.ASTVAR; import com.netflix.metacat.common.server.partition.parser.SimpleNode; import com.netflix.metacat.common.server.partition.parser.Variable; import com.netflix.metacat.common.server.partition.util.PartitionUtil; import java.util.Collection; import java.util.HashSet; import java.util.Set; /** * Partition key evaluation. */ public class PartitionKeyParserEval extends PartitionParserEval { /** * Evaluate the expression. * @param node node in the expression tree * @param data data * @return Evaluated string */ public String evalString(final SimpleNode node, final Object data) { final Object value1 = node.jjtGetChild(0).jjtAccept(this, data); final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data); final Object value2 = node.jjtGetChild(2).jjtAccept(this, data); if (comparison != Compare.EQ) { return null; } return String.format("%s=%s", value1, toValue(value2)); } /** * Converts to String. * @param value value object * @return String */ protected String toValue(final Object value) { return value == null ? PartitionUtil.DEFAULT_PARTITION_NAME : value.toString(); } @SuppressWarnings("unchecked") @Override public Object visit(final ASTAND node, final Object data) { final Collection v1 = (Collection) node.jjtGetChild(0).jjtAccept(this, data); final Object b = node.jjtGetChild(1).jjtAccept(this, data); v1.addAll((Collection) b); return v1; } @Override public Object visit(final ASTEQ node, final Object data) { return Compare.EQ; } @Override public Object visit(final ASTCOMPARE node, final Object data) { Set<String> result = Sets.newHashSet(); if (node.jjtGetNumChildren() == 3) { final String value = evalString(node, data); if (value != null) { result = Sets.newHashSet(value); } } return result; } @Override public Object visit(final ASTOR node, final Object data) { return new HashSet<String>(); } @Override public Object visit(final ASTBETWEEN node, final Object data) { return new HashSet<String>(); } @Override public Object visit(final ASTIN node, final Object data) { return new HashSet<String>(); } @Override public Object visit(final ASTLIKE node, final Object data) { return new HashSet<String>(); } @Override public Object visit(final ASTNOT node, final Object data) { return new HashSet<String>(); } @Override public Object visit(final ASTNULL node, final Object data) { return new HashSet<String>(); } @Override public Object visit(final ASTVAR node, final Object data) { return ((Variable) node.jjtGetValue()).getName(); } }
9,927
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/visitor/PartitionParamParserEval.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.partition.visitor; import com.netflix.metacat.common.server.partition.parser.SimpleNode; /** * Partition param evaluation. */ public class PartitionParamParserEval extends PartitionKeyParserEval { @Override public String evalString(final SimpleNode node, final Object data) { final Object value1 = node.jjtGetChild(0).jjtAccept(this, data); if (!"dateCreated".equals(value1)) { return null; } final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data); final Object value2 = node.jjtGetChild(2).jjtAccept(this, data); return String.format("%s%s%s", value1, comparison.getExpression(), value2.toString()); } }
9,928
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/partition/visitor/package-info.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Includes visitor classes. * * @author amajumdar */ package com.netflix.metacat.common.server.partition.visitor;
9,929
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/monitoring/package-info.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Includes utility classes for servo classes. * * @author amajumdar */ package com.netflix.metacat.common.server.monitoring;
9,930
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/monitoring/Metrics.java
/* * Copyright 2016 Netflix, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.monitoring; import com.google.common.collect.ImmutableMap; import lombok.Getter; import java.util.Map; //CHECKSTYLE:OFF /** * Metric measures. * * @author zhenl * @since 1.0.0 */ @Getter public enum Metrics { /** * Events. */ CounterEventPublish(Component.events, Type.counter, "publish"), /** * thrift request. */ CounterThrift(Component.thrift, Type.counter, "request"), /** * DistributionSummary. */ DistributionSummaryAddPartitions(Component.partionservice, Type.distributionSummary, "partitionAdd"), DistributionSummaryMetadataOnlyAddPartitions(Component.partionservice, Type.distributionSummary, "partitionMetadataOnlyAdd"), DistributionSummaryGetPartitions(Component.partionservice, Type.distributionSummary, "partitionGet"), DistributionSummaryDeletePartitions(Component.partionservice, Type.distributionSummary, "partitionDelete"), /** * metacat request. */ CounterRequestCount(Component.server, Type.counter, "request"), CounterRequestFailureCount(Component.server, Type.counter, "requestfailure"), CounterTransactionRetryFailure(Component.server, Type.counter, "transactionRetryFailure"), CounterDeleteMetaData(Component.server, Type.counter, "deleteMetadata"), CounterCatalogTraversal(Component.server, Type.counter, "catalogTraversal"), CounterCatalogTraversalAlreadyRunning(Component.server, Type.counter, "catalogTraversalAlreadyRunning"), CounterCatalogTraversalCatalogReadFailed(Component.server, Type.counter, "catalogTraversalCatalogReadFailed"), CounterCatalogTraversalDatabaseReadFailed(Component.server, Type.counter, "catalogTraversalDatabaseReadFailed"), CounterCatalogTraversalTableReadFailed(Component.server, Type.counter, "catalogTraversalTableReadFailed"), CounterTableUpdateIgnoredException(Component.tableservice, Type.counter, "tableUpdateIgnoredException"), CounterTableCreateIgnoredException(Component.tableservice, Type.counter, "tableCreateIgnoredException"), CounterRequestRateLimitExceededCount(Component.server, Type.counter, "requestRateLimitExceeded"), /** * Notifications. */ CounterSNSNotificationPartitionAdd(Component.notifications, Type.counter, "partitionsAdd"), CounterSNSNotificationPartitionLatestDeleteColumnAdd(Component.notifications, Type.counter, "partitionsLatestDeleteColumnAdd"), CounterSNSNotificationTablePartitionAdd(Component.notifications, Type.counter, "table.partitionsAdd"), CounterSNSNotificationPartitionDelete(Component.notifications, Type.counter, "partitionsDelete"), CounterSNSNotificationTablePartitionDelete(Component.notifications, Type.counter, "table.partitionsDelete"), CounterSNSNotificationTableCreate(Component.notifications, Type.counter, "table.Create"), CounterSNSNotificationTableDelete(Component.notifications, Type.counter, "table.Delete"), CounterSNSNotificationTableRename(Component.notifications, Type.counter, "table.Rename"), CounterSNSNotificationTableUpdate(Component.notifications, Type.counter, "table.Update"), CounterSNSNotificationPublishMessageSizeExceeded(Component.notifications, Type.counter, "publish.message.size.exceeded"), CounterSNSNotificationPublishFallback(Component.notifications, Type.counter, "publish.fallback"), CounterSNSNotificationPublishPartitionIdNumberExceeded(Component.notifications, Type.counter, "publish.partitionid.number.exceeded"), /** * ElasticSearch. */ TimerElasticSearchEventsDelay(Component.elasticsearch, Type.timer, "events.delay"), TimerElasticSearchDatabaseCreate(Component.elasticsearch, Type.timer, "databaseCreate"), TimerElasticSearchDatabaseDelete(Component.elasticsearch, Type.timer, "databaseDelete"), TimerElasticSearchTableCreate(Component.elasticsearch, Type.timer, "tableCreate"), TimerElasticSearchTableDelete(Component.elasticsearch, Type.timer, "tableDelete"), TimerElasticSearchTableSave(Component.elasticsearch, Type.timer, "tableSave"), TimerElasticSearchTableRename(Component.elasticsearch, Type.timer, "tableRename"), TimerElasticSearchTableUpdate(Component.elasticsearch, Type.timer, "tableUpdate"), TimerElasticSearchPartitionSave(Component.elasticsearch, Type.timer, "partitionSave"), TimerElasticSearchPartitionDelete(Component.elasticsearch, Type.timer, "partitionDelete"), CounterElasticSearchDelete(Component.elasticsearch, Type.counter, "esDelete"), CounterElasticSearchBulkDelete(Component.elasticsearch, Type.counter, "esBulkDelete"), CounterElasticSearchUpdate(Component.elasticsearch, Type.counter, "esUpdate"), CounterElasticSearchBulkUpdate(Component.elasticsearch, Type.counter, "esBulkUpdate"), CounterElasticSearchSave(Component.elasticsearch, Type.counter, "esSave"), CounterElasticSearchBulkSave(Component.elasticsearch, Type.counter, "esBulkSave"), CounterElasticSearchLog(Component.elasticsearch, Type.counter, "esLog"), CounterElasticSearchRefresh(Component.elasticsearch, Type.counter, "esRefresh"), CounterElasticSearchRefreshAlreadyRunning(Component.elasticsearch, Type.counter, "esRefreshAlreadyRunning"), CounterElasticSearchUnmarkedDatabaseThreshholdReached(Component.elasticsearch, Type.counter, "unmarkedDatabasesThresholdReached"), CounterElasticSearchUnmarkedTableThreshholdReached(Component.elasticsearch, Type.counter, "unmarkedTablesThresholdReached"), /** * Jdbc Interceptor */ GaugeConnectionsTotal(Component.jdbcinterceptor, Type.gauge, "connections.total"), GaugeConnectionsActive(Component.jdbcinterceptor, Type.gauge, "connections.active"), GaugeConnectionsIdle(Component.jdbcinterceptor, Type.gauge, "connections.idle"), /** * Timers. */ TimerRequest(Component.server, Type.timer, "requests"), TimerThriftRequest(Component.server, Type.timer, "requests"), TimerElasticSearchRefresh(Component.server, Type.timer, "esRefresh"), TimerCatalogTraversal(Component.server, Type.timer, "catalogTraversal"), TimerNotificationsPublishDelay(Component.server, Type.timer, "publish.delay"), TimerNotificationsBeforePublishDelay(Component.server, Type.timer, "before.publish.delay"), TimerSavePartitionMetadata(Component.partionservice, Type.timer, "savePartitionMetadata"), TimerSaveTableMetadata(Component.tableservice, Type.timer, "saveTableMetadata"), TagEventsType("metacat.events.type"); public final static Map<String, String> tagStatusSuccessMap = ImmutableMap.of("status", "success"); public final static Map<String, String> tagStatusFailureMap = ImmutableMap.of("status", "failure"); enum Type { counter, gauge, timer, distributionSummary } enum Component { metacat, events, thrift, server, notifications, tableservice, partionservice, elasticsearch, jdbcinterceptor } private final String metricName; Metrics(final Component component, final Type type, final String measure) { this.metricName = Component.metacat.name() + "." + component.name() + "." + type.name() + "." + measure; } Metrics(final String tagName) { this.metricName = tagName; } @Override public String toString() { return metricName; } }
9,931
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import javax.annotation.Nullable; /** * Post create metacat view event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatCreateMViewPostEvent extends MetacatEvent { private final TableDto table; private final String filter; private final Boolean snapshot; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info * @param snapshot snapshot * @param filter filter */ public MetacatCreateMViewPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table, final Boolean snapshot, @Nullable final String filter ) { super(name, requestContext, source); this.table = table; this.snapshot = snapshot; this.filter = filter; } }
9,932
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.DatabaseDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre database delete event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteDatabasePreEvent extends MetacatEvent { private final DatabaseDto database; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param database database info */ public MetacatDeleteDatabasePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final DatabaseDto database ) { super(name, requestContext, source); this.database = database; } }
9,933
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post delete view event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteMViewPostEvent extends MetacatEvent { private final TableDto table; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info */ public MetacatDeleteMViewPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table ) { super(name, requestContext, source); this.table = table; } }
9,934
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post create table event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatCreateTablePostEvent extends MetacatEvent { private final TableDto table; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info */ public MetacatCreateTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table ) { super(name, requestContext, source); this.table = table; } }
9,935
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatApplicationEventMulticaster.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.google.common.collect.Maps; import com.netflix.metacat.common.server.properties.MetacatProperties; import com.netflix.metacat.common.server.util.RegistryUtil; import com.netflix.spectator.api.Registry; import lombok.extern.slf4j.Slf4j; import org.springframework.context.ApplicationEvent; import org.springframework.context.ApplicationListener; import org.springframework.context.event.ApplicationEventMulticaster; import org.springframework.context.event.SimpleApplicationEventMulticaster; import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; import java.util.Map; /** * Event bus implementation using Springs Event Multicaster. By default, Spring supports synchronous event publishing. * This implementation supports both synchronous and asynchronous event publishing. If the listener is annotated with * AsyncListener, the event will be published asynchronously using a separate executor pool. * This implementation should be used as the application event multicaster in the Springs context. * Synchronous publishing of events is handled by this class and the asynchronous publishing of events is handled by * the asyncEventMulticaster. * * @author amajumdar * @since 1.1.x */ @Slf4j public class MetacatApplicationEventMulticaster extends SimpleApplicationEventMulticaster { //Map of event multicasters keyed by the listener class name. private final Map<String, ApplicationEventMulticaster> asyncEventMulticasters = Maps.newHashMap(); private final Registry registry; private final MetacatProperties metacatProperties; /** * Constructor. * * @param registry registry for spectator * @param metacatProperties The metacat properties to get number of executor threads from. * Likely best to do one more than number of CPUs */ public MetacatApplicationEventMulticaster(final Registry registry, final MetacatProperties metacatProperties) { super(); this.registry = registry; this.metacatProperties = metacatProperties; } /** * Post event. Events will be handles synchronously or asynchronously based on the listener annotation. * * @param event event */ public void post(final ApplicationEvent event) { super.multicastEvent(event); asyncEventMulticasters.values().forEach(aem -> aem.multicastEvent(event)); } @Override public void addApplicationListener(final ApplicationListener listener) { if (isAsyncListener(listener)) { final Class<?> clazz = getListenerTargetClass(listener); final String clazzName = clazz.getName(); if (!asyncEventMulticasters.containsKey(clazzName)) { // Using simple name of the class to use for registering it with registry. // There is a chance of name collision if two class names are the same under different packages. asyncEventMulticasters.put(clazzName, createApplicationEventMultiCaster(clazz.getSimpleName())); } asyncEventMulticasters.get(clazzName).addApplicationListener(listener); } else { super.addApplicationListener(listener); } } private boolean isAsyncListener(final ApplicationListener listener) { return listener.getClass().isAnnotationPresent(AsyncListener.class) || (listener instanceof MetacatApplicationListenerMethodAdapter && ((MetacatApplicationListenerMethodAdapter) listener).getTargetClass() .isAnnotationPresent(AsyncListener.class)); } private Class<?> getListenerTargetClass(final ApplicationListener listener) { return listener instanceof MetacatApplicationListenerMethodAdapter ? ((MetacatApplicationListenerMethodAdapter) listener).getTargetClass() : listener.getClass(); } private ApplicationEventMulticaster createApplicationEventMultiCaster(final String name) { final SimpleApplicationEventMulticaster result = new SimpleApplicationEventMulticaster(); final ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setCorePoolSize(metacatProperties.getEvent().getBus().getExecutor().getThread().getCount()); executor.initialize(); RegistryUtil.registerThreadPool(registry, "metacat.event.pool." + name, executor.getThreadPoolExecutor()); result.setTaskExecutor(executor); return result; } @Override public void removeApplicationListener(final ApplicationListener listener) { super.removeApplicationListener(listener); asyncEventMulticasters.values().forEach(aem -> aem.removeApplicationListener(listener)); } @Override public void removeAllListeners() { super.removeAllListeners(); asyncEventMulticasters.values().forEach(ApplicationEventMulticaster::removeAllListeners); } }
9,936
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post table delete event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteTablePostEvent extends MetacatEvent { private final TableDto table; private final boolean isMView; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info */ public MetacatDeleteTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table ) { this(name, requestContext, source, table, false); } /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info * @param isMView true, if the table is a materialized view */ public MetacatDeleteTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table, final boolean isMView ) { super(name, requestContext, source); this.table = table; this.isMView = isMView; } }
9,937
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre table partition save event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatSaveTablePartitionPreEvent extends MetacatEvent { private final PartitionsSaveRequestDto saveRequest; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param saveRequest request */ public MetacatSaveTablePartitionPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final PartitionsSaveRequestDto saveRequest ) { super(name, requestContext, source); this.saveRequest = saveRequest; } }
9,938
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post database update event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateDatabasePostEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatUpdateDatabasePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,939
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre table partition delete event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteTablePartitionPreEvent extends MetacatEvent { private final PartitionsSaveRequestDto saveRequest; /** * Constructor. * * @param name name * @param metacatRequestContext context * @param source The source object which threw this event * @param saveRequest request */ public MetacatDeleteTablePartitionPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext metacatRequestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final PartitionsSaveRequestDto saveRequest ) { super(name, metacatRequestContext, source); this.saveRequest = saveRequest; } }
9,940
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import java.util.Collections; import java.util.List; /** * Post partition save event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatSaveMViewPartitionPostEvent extends MetacatEvent { private final List<PartitionDto> partitions; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param partitions partitions */ public MetacatSaveMViewPartitionPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final List<PartitionDto> partitions ) { super(name, requestContext, source); this.partitions = Collections.unmodifiableList(partitions); } }
9,941
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateIcebergTablePostEvent.java
package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; /** * Event fired after an Iceberg table has been updated. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateIcebergTablePostEvent extends MetacatEvent { private final TableDto oldTable; private final TableDto requestTable; /** * Constructor. * * @param name The name of the Table. * @param requestContext The request context. * @param source The source of this event. * @param oldTable The old Table instance. * @param requestTable The TableDto instance sent in the request. */ public MetacatUpdateIcebergTablePostEvent( final QualifiedName name, final MetacatRequestContext requestContext, final Object source, @NonNull final TableDto oldTable, @NonNull final TableDto requestTable ) { super(name, requestContext, source); this.oldTable = oldTable; this.requestTable = requestTable; } }
9,942
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre view partition save event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatSaveMViewPartitionPreEvent extends MetacatEvent { private final PartitionsSaveRequestDto saveRequest; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param saveRequest request */ public MetacatSaveMViewPartitionPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final PartitionsSaveRequestDto saveRequest ) { super(name, requestContext, source); this.saveRequest = saveRequest; } }
9,943
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteMViewPartitionPreEvent extends MetacatEvent { private final PartitionsSaveRequestDto saveRequest; /** * Constructor. * * @param name name * @param metacatRequestContext context * @param source The source object which threw this event * @param saveRequest request */ public MetacatDeleteMViewPartitionPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext metacatRequestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final PartitionsSaveRequestDto saveRequest ) { super(name, metacatRequestContext, source); this.saveRequest = saveRequest; } }
9,944
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; /** * Post table partition delete event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteTablePartitionPostEvent extends MetacatEvent { private final List<String> partitionIds; private final List<PartitionDto> partitions; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param partitions partition dtos */ public MetacatDeleteTablePartitionPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final List<PartitionDto> partitions ) { super(name, requestContext, source); this.partitions = Collections.unmodifiableList(partitions); this.partitionIds = partitions.stream() .map(dto -> dto.getName().getPartitionName()).collect(Collectors.toList()); } }
9,945
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre create database event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatCreateDatabasePreEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatCreateDatabasePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,946
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre database update event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateDatabasePreEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatUpdateDatabasePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,947
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.DatabaseDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post create database event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatCreateDatabasePostEvent extends MetacatEvent { private final DatabaseDto database; /** * Constructor. * * @param name name * @param requestContext request context * @param source The source object which threw this event * @param database database info */ public MetacatCreateDatabasePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final DatabaseDto database ) { super(name, requestContext, source); this.database = database; } }
9,948
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post rename view event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatRenameMViewPostEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatRenameMViewPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,949
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre create table event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatCreateTablePreEvent extends MetacatEvent { private final TableDto table; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info */ public MetacatCreateTablePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table ) { super(name, requestContext, source); this.table = table; } }
9,950
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionMetadataOnlyPostEvent.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionDto; import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import java.util.List; /** * Metacat save table partition metadata only post event. * * @author zhenl * @since 1.2.0 */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatSaveTablePartitionMetadataOnlyPostEvent extends MetacatEvent { private final List<PartitionDto> partitions; private final PartitionsSaveResponseDto partitionsSaveResponse; /** * Constructor. * * @param name name * @param metacatRequestContext context * @param source The source object which threw this event * @param partitions partitions * @param partitionsSaveResponse response */ public MetacatSaveTablePartitionMetadataOnlyPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext metacatRequestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final List<PartitionDto> partitions, @Nonnull @NonNull final PartitionsSaveResponseDto partitionsSaveResponse ) { super(name, metacatRequestContext, source); this.partitions = partitions; this.partitionsSaveResponse = partitionsSaveResponse; } }
9,951
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post table rename event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatRenameTablePostEvent extends MetacatEvent { private final TableDto currentTable; private final TableDto oldTable; private final boolean isMView; /** * Constructor. * * @param name The old name of the table * @param requestContext The metacat request context * @param source The source object which threw this event * @param oldTable The old dto of the table * @param currentTable The new representation of the table */ public MetacatRenameTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto oldTable, @Nonnull @NonNull final TableDto currentTable ) { this(name, requestContext, source, oldTable, currentTable, false); } /** * Constructor. * * @param name The old name of the table * @param requestContext The metacat request context * @param source The source object which threw this event * @param oldTable The old dto of the table * @param currentTable The new representation of the table * @param isMView true, if the table is a materialized view */ public MetacatRenameTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto oldTable, @Nonnull @NonNull final TableDto currentTable, final boolean isMView ) { super(name, requestContext, source); this.oldTable = oldTable; this.currentTable = currentTable; this.isMView = isMView; } }
9,952
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEventBus.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.server.monitoring.Metrics; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Registry; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import org.springframework.context.ApplicationEvent; import javax.annotation.Nonnull; /** * Event bus. * * @author amajumdar * @author tgianos * @since 0.x */ @Slf4j public class MetacatEventBus { private final MetacatApplicationEventMulticaster applicationEventMulticaster; private final Counter eventPublishCounter; /** * Constructor. * * @param applicationEventMulticaster The event multicaster to use * @param registry The registry to spectator */ public MetacatEventBus( @Nonnull @NonNull final MetacatApplicationEventMulticaster applicationEventMulticaster, @Nonnull @NonNull final Registry registry ) { this.applicationEventMulticaster = applicationEventMulticaster; this.eventPublishCounter = registry.counter(Metrics.CounterEventPublish.getMetricName()); } /** * Post event. * * @param event event */ public void post(final ApplicationEvent event) { log.debug("Received request to post an event {}", event); this.eventPublishCounter.increment(); this.applicationEventMulticaster.post(event); } }
9,953
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/AsyncListener.java
/* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.events; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Annotation to mark a listener class whose methods will be processed asynchronously by the event bus. * * @author amajumdar */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) public @interface AsyncListener { }
9,954
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import javax.annotation.Nullable; /** * Pre create view event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatCreateMViewPreEvent extends MetacatEvent { private final String filter; private final Boolean snapshot; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param snapshot snapshot * @param filter filter */ public MetacatCreateMViewPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, final Boolean snapshot, @Nullable final String filter ) { super(name, requestContext, source); this.snapshot = snapshot; this.filter = filter; } }
9,955
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post table update event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateTablePostEvent extends MetacatEvent { private TableDto oldTable; private TableDto currentTable; private boolean isLatestCurrentTable; /** * Constructor. * * @param name The name of the table that was updated * @param requestContext The metacat request context * @param source The source object which threw this event * @param oldTable The old DTO representation of the table * @param currentTable The current DTO representation of the table */ public MetacatUpdateTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto oldTable, @Nonnull @NonNull final TableDto currentTable ) { this(name, requestContext, source, oldTable, currentTable, true); } /** * Constructor. * * @param name The name of the table that was updated * @param requestContext The metacat request context * @param source The source object which threw this event * @param oldTable The old DTO representation of the table * @param currentTable The current DTO representation of the table * @param isLatestCurrentTable Whether the current table was successfully refreshed post-update */ public MetacatUpdateTablePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto oldTable, @Nonnull @NonNull final TableDto currentTable, final boolean isLatestCurrentTable ) { super(name, requestContext, source); this.oldTable = oldTable; this.currentTable = currentTable; this.isLatestCurrentTable = isLatestCurrentTable; } }
9,956
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre table rename event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatRenameTablePreEvent extends MetacatEvent { private final QualifiedName newName; /** * Constructor. * * @param name name. * @param requestContext context * @param source The source object which threw this event * @param newName new name */ public MetacatRenameTablePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final QualifiedName newName ) { super(name, requestContext, source); this.newName = newName; } }
9,957
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import java.util.Collections; import java.util.List; /** * Post delete view partition event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteMViewPartitionPostEvent extends MetacatEvent { private final List<String> partitionIds; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param partitionIds partition names */ public MetacatDeleteMViewPartitionPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final List<String> partitionIds ) { super(name, requestContext, source); this.partitionIds = Collections.unmodifiableList(partitionIds); } }
9,958
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionMetadataOnlyPreEvent.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Metacat save table partition metadata only pre event. * * @author zhenl * @since 1.2.0 */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatSaveTablePartitionMetadataOnlyPreEvent extends MetacatEvent { private final PartitionsSaveRequestDto saveRequest; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param saveRequest request */ public MetacatSaveTablePartitionMetadataOnlyPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final PartitionsSaveRequestDto saveRequest ) { super(name, requestContext, source); this.saveRequest = saveRequest; } }
9,959
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.DatabaseDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post delete database event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteDatabasePostEvent extends MetacatEvent { private final DatabaseDto database; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param database database info */ public MetacatDeleteDatabasePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final DatabaseDto database ) { super(name, requestContext, source); this.database = database; } }
9,960
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatApplicationListenerMethodAdapter.java
/* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.events; import org.springframework.context.event.ApplicationListenerMethodAdapter; import java.lang.reflect.Method; /** * This class has been introduced to get access to the targetClass in ApplicationListenerMethodAdapter. * * @author amajumdar * @since 1.2.x */ public class MetacatApplicationListenerMethodAdapter extends ApplicationListenerMethodAdapter { private final Class<?> targetClass; /** * Constructor. * @param beanName bean name * @param targetClass bean class * @param method bean method */ public MetacatApplicationListenerMethodAdapter(final String beanName, final Class<?> targetClass, final Method method) { super(beanName, targetClass, method); this.targetClass = targetClass; } public Class<?> getTargetClass() { return targetClass; } }
9,961
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre view rename event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatRenameMViewPreEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatRenameMViewPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,962
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post view update event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateMViewPostEvent extends MetacatEvent { private final TableDto table; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info */ public MetacatUpdateMViewPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table ) { super(name, requestContext, source); this.table = table; } }
9,963
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import org.springframework.context.ApplicationEvent; import javax.annotation.Nonnull; /** * Event within the Metacat JVM. * * @author amajumdar * @author tgianos */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatEvent extends ApplicationEvent { private final QualifiedName name; private final MetacatRequestContext requestContext; /** * Constructor. * * @param name The qualified name of the resource this event pertains to * @param requestContext The request context that triggered this event * @param source The source object this event was generated from */ public MetacatEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(source); this.name = name; this.requestContext = requestContext; } }
9,964
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre database rename event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatRenameDatabasePreEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatRenameDatabasePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,965
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Post database rename event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatRenameDatabasePostEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatRenameDatabasePostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,966
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre table delete event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteTablePreEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatDeleteTablePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,967
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEventListenerFactory.java
/* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.metacat.common.server.events; import org.springframework.context.ApplicationListener; import org.springframework.context.event.EventListenerFactory; import java.lang.reflect.Method; /** * This class overrides the DefaultEventListenerFactory in the Spring container. * * @author amajumdar * @since 1.2.x */ public class MetacatEventListenerFactory implements EventListenerFactory { @Override public boolean supportsMethod(final Method method) { return true; } @Override public ApplicationListener<?> createApplicationListener(final String beanName, final Class<?> type, final Method method) { return new MetacatApplicationListenerMethodAdapter(beanName, type, method); } }
9,968
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre delete view event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatDeleteMViewPreEvent extends MetacatEvent { /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event */ public MetacatDeleteMViewPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source ) { super(name, requestContext, source); } }
9,969
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre table update event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateTablePreEvent extends MetacatEvent { private final TableDto currentTable; private final TableDto oldTable; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param oldTable old table info * @param currentTable new table info */ public MetacatUpdateTablePreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto oldTable, @Nonnull @NonNull final TableDto currentTable ) { super(name, requestContext, source); this.oldTable = oldTable; this.currentTable = currentTable; } }
9,970
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPostEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.PartitionDto; import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; import java.util.Collections; import java.util.List; /** * Post table partition save event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatSaveTablePartitionPostEvent extends MetacatEvent { private final List<PartitionDto> partitions; private final PartitionsSaveResponseDto partitionsSaveResponse; /** * Constructor. * * @param name name * @param metacatRequestContext context * @param source The source object which threw this event * @param partitions partitions * @param partitionsSaveResponse response */ public MetacatSaveTablePartitionPostEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext metacatRequestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final List<PartitionDto> partitions, @Nonnull @NonNull final PartitionsSaveResponseDto partitionsSaveResponse ) { super(name, metacatRequestContext, source); this.partitions = Collections.unmodifiableList(partitions); this.partitionsSaveResponse = partitionsSaveResponse; } }
9,971
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/package-info.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * This package includes metacat events. * * @author amajumdar */ package com.netflix.metacat.common.server.events;
9,972
0
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPreEvent.java
/* * * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.common.server.events; import com.netflix.metacat.common.MetacatRequestContext; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.TableDto; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.ToString; import javax.annotation.Nonnull; /** * Pre view update event. */ @Getter @EqualsAndHashCode(callSuper = true) @ToString(callSuper = true) public class MetacatUpdateMViewPreEvent extends MetacatEvent { private final TableDto table; /** * Constructor. * * @param name name * @param requestContext context * @param source The source object which threw this event * @param table table info */ public MetacatUpdateMViewPreEvent( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final MetacatRequestContext requestContext, @Nonnull @NonNull final Object source, @Nonnull @NonNull final TableDto table ) { super(name, requestContext, source); this.table = table; } }
9,973
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorModule.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.datastax.driver.core.Cluster; import com.google.common.collect.ImmutableList; import com.google.inject.AbstractModule; import com.google.inject.Provides; import com.google.inject.Scopes; import com.google.inject.Singleton; import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService; import com.netflix.metacat.common.server.connectors.ConnectorPartitionService; import com.netflix.metacat.common.server.connectors.ConnectorTableService; import com.netflix.metacat.common.server.connectors.ConnectorUtils; import lombok.NonNull; import javax.annotation.Nonnull; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Map; /** * A Guice Module for the CassandraConnector. * * @author tgianos * @since 1.0.0 */ public class CassandraConnectorModule extends AbstractModule { private static final String CONTACT_POINTS_KEY = "cassandra.contactPoints"; private static final String PORT_KEY = "cassandra.port"; private static final String USERNAME_KEY = "cassandra.username"; private static final String PASSWORD_KEY = "cassandra.password"; private final String catalogShardName; private final Map<String, String> configuration; /** * Constructor. * * @param catalogShardName catalog shard name * @param configuration connector configuration */ CassandraConnectorModule( @Nonnull @NonNull final String catalogShardName, @Nonnull @NonNull final Map<String, String> configuration ) { this.catalogShardName = catalogShardName; this.configuration = configuration; } /** * {@inheritDoc} */ @Override protected void configure() { this.bind(CassandraTypeConverter.class).toInstance(new CassandraTypeConverter()); this.bind(CassandraExceptionMapper.class).toInstance(new CassandraExceptionMapper()); this.bind(ConnectorDatabaseService.class) .to(ConnectorUtils.getDatabaseServiceClass(this.configuration, CassandraConnectorDatabaseService.class)) .in(Scopes.SINGLETON); this.bind(ConnectorTableService.class) .to(ConnectorUtils.getTableServiceClass(this.configuration, CassandraConnectorTableService.class)) .in(Scopes.SINGLETON); this.bind(ConnectorPartitionService.class) .to(ConnectorUtils.getPartitionServiceClass(this.configuration, CassandraConnectorPartitionService.class)) .in(Scopes.SINGLETON); } /** * Creates a cluster to use for connecting to Cassandra. * * @return The cluster singleton to use within the Injector */ @Provides @Singleton Cluster provideCluster() { final Cluster.Builder builder = Cluster.builder().withClusterName(this.catalogShardName); // Contact points are required final String contactPointsString = this.configuration.get(CONTACT_POINTS_KEY); if (contactPointsString == null) { throw new IllegalArgumentException(CONTACT_POINTS_KEY + " value is missing and is required."); } final String[] contactPoints = contactPointsString.split(","); final ImmutableList.Builder<InetAddress> contactAddresses = ImmutableList.builder(); for (final String contactPoint : contactPoints) { try { contactAddresses.add(InetAddress.getByName(contactPoint)); } catch (final UnknownHostException uhe) { throw new IllegalArgumentException("Can't parse contact point " + contactPoint, uhe); } } builder.addContactPoints(contactAddresses.build()); final String port = this.configuration.get(PORT_KEY); if (port != null) { builder.withPort(Integer.parseInt(port)); } final String username = this.configuration.get(USERNAME_KEY); final String password = this.configuration.get(PASSWORD_KEY); if (username != null && password != null) { builder.withCredentials(username, password); } return builder.build(); } }
9,974
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraService.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Session; import lombok.Getter; import lombok.NonNull; import javax.annotation.Nonnull; /** * Abstract class for common Cassandra methods based around the Cluster. * * @author tgianos * @since 1.0.0 */ @Getter abstract class CassandraService { private final Cluster cluster; private final CassandraExceptionMapper exceptionMapper; CassandraService( @Nonnull @NonNull final Cluster cluster, @Nonnull @NonNull final CassandraExceptionMapper exceptionMapper ) { this.cluster = cluster; this.exceptionMapper = exceptionMapper; } /** * Execute a query on the Cassandra cluster pointed to by the given Cluster configuration. * * @param query The query to execute * @return The query results if any * @throws com.datastax.driver.core.exceptions.NoHostAvailableException if no host in the cluster can be * contacted successfully to execute this * query. * @throws com.datastax.driver.core.exceptions.QueryExecutionException if the query triggered an execution * exception, i.e. an exception thrown by * Cassandra when it cannot execute * the query with the requested consistency * level successfully. * @throws com.datastax.driver.core.exceptions.QueryValidationException if the query if invalid (syntax error, * unauthorized or any other validation * problem). */ ResultSet executeQuery(@Nonnull @NonNull final String query) { try (final Session session = this.cluster.connect()) { // From documentation it doesn't look like ResultSet needs to be closed return session.execute(query); } } }
9,975
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorTableService.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.ColumnMetadata; import com.datastax.driver.core.KeyspaceMetadata; import com.datastax.driver.core.TableMetadata; import com.datastax.driver.core.exceptions.DriverException; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.Pageable; import com.netflix.metacat.common.dto.Sort; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.ConnectorTableService; import com.netflix.metacat.common.server.connectors.ConnectorUtils; import com.netflix.metacat.common.server.connectors.model.FieldInfo; import com.netflix.metacat.common.server.connectors.model.TableInfo; import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException; import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.inject.Inject; import java.util.Comparator; import java.util.List; /** * Cassandra implementation of the ConnectorTableService. * * @author tgianos * @see ConnectorTableService * @since 3.0.0 */ @Slf4j public class CassandraConnectorTableService extends CassandraService implements ConnectorTableService { private final CassandraTypeConverter typeConverter; /** * Constructor. * * @param cluster The cluster this service connects to * @param exceptionMapper The exception mapper to use * @param typeConverter The type converter to convert from CQL types to Metacat Types */ @Inject public CassandraConnectorTableService( @Nonnull @NonNull final Cluster cluster, @Nonnull @NonNull final CassandraExceptionMapper exceptionMapper, @Nonnull @NonNull final CassandraTypeConverter typeConverter ) { super(cluster, exceptionMapper); this.typeConverter = typeConverter; } /** * {@inheritDoc} */ @Override public void delete(@Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name) { final String keyspace = name.getDatabaseName(); final String table = name.getTableName(); log.debug("Attempting to delete Cassandra table {}.{} for request {}", keyspace, table, context); try { this.executeQuery("USE " + keyspace + "; DROP TABLE IF EXISTS " + table + ";"); log.debug("Successfully deleted Cassandra table {}.{} for request {}", keyspace, table, context); } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public TableInfo get(@Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name) { final String keyspace = name.getDatabaseName(); final String table = name.getTableName(); log.debug("Attempting to get metadata for Cassandra table {}.{} for request {}", keyspace, table, context); try { final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace); if (keyspaceMetadata == null) { throw new DatabaseNotFoundException(name); } final TableMetadata tableMetadata = keyspaceMetadata.getTable(table); if (tableMetadata == null) { throw new TableNotFoundException(name); } final TableInfo tableInfo = this.getTableInfo(name, tableMetadata); log.debug("Successfully got metadata for Cassandra table {}.{} for request {}", keyspace, table, context); return tableInfo; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public boolean exists( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name ) { final String keyspace = name.getDatabaseName(); final String table = name.getTableName(); log.debug("Checking if Cassandra table {}.{} exists for request {}", keyspace, table, context); try { final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace); if (keyspaceMetadata == null) { return false; } final boolean exists = keyspaceMetadata.getTable(table) != null; log.debug( "Cassandra table {}.{} {} for request {}", keyspace, table, exists ? "exists" : "doesn't exist", context ); return exists; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public List<TableInfo> list( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { final String keyspace = name.getDatabaseName(); log.debug("Attempting to list tables in Cassandra keyspace {} for request {}", keyspace, context); try { final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace); if (keyspaceMetadata == null) { throw new DatabaseNotFoundException(name); } // TODO: Should we include views? final List<TableInfo> tables = Lists.newArrayList(); for (final TableMetadata tableMetadata : keyspaceMetadata.getTables()) { if (prefix != null && !tableMetadata.getName().startsWith(prefix.getTableName())) { continue; } tables.add(this.getTableInfo(name, tableMetadata)); } // Sort if (sort != null) { final Comparator<TableInfo> tableComparator = Comparator.comparing((t) -> t.getName().getTableName()); ConnectorUtils.sort(tables, sort, tableComparator); } // Paging final List<TableInfo> pagedTables = ConnectorUtils.paginate(tables, pageable); log.debug( "Listed {} tables in Cassandra keyspace {} for request {}", pagedTables.size(), keyspace, context ); return pagedTables; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public List<QualifiedName> listNames( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { final String catalog = name.getCatalogName(); final String keyspace = name.getDatabaseName(); log.debug("Attempting to list table names in Cassandra keyspace {} for request {}", keyspace, context); try { final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace); if (keyspaceMetadata == null) { throw new DatabaseNotFoundException(name); } // TODO: Should we include views? final List<QualifiedName> tableNames = Lists.newArrayList(); for (final TableMetadata tableMetadata : keyspaceMetadata.getTables()) { final String tableName = tableMetadata.getName(); if (prefix != null && !tableName.startsWith(prefix.getTableName())) { continue; } tableNames.add(QualifiedName.ofTable(catalog, keyspace, tableName)); } // Sort if (sort != null) { final Comparator<QualifiedName> tableNameComparator = Comparator.comparing(QualifiedName::getTableName); ConnectorUtils.sort(tableNames, sort, tableNameComparator); } // Paging final List<QualifiedName> paged = ConnectorUtils.paginate(tableNames, pageable); log.debug("Listed {} table names in Cassandra keyspace {} for request {}", paged.size(), keyspace, context); return paged; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } private TableInfo getTableInfo( @Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final TableMetadata tableMetadata ) { final ImmutableList.Builder<FieldInfo> fieldInfoBuilder = ImmutableList.builder(); // TODO: Ignores clustering, primary key, index, etc columns. We need to rework TableInfo to support for (final ColumnMetadata column : tableMetadata.getColumns()) { final String dataType = column.getType().toString(); fieldInfoBuilder.add( FieldInfo.builder() .name(column.getName()) .sourceType(dataType) .type(this.typeConverter.toMetacatType(dataType)) .build() ); } return TableInfo.builder() .name(QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableMetadata.getName())) .fields(fieldInfoBuilder.build()) .build(); } }
9,976
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorPartitionService.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.netflix.metacat.common.server.connectors.ConnectorPartitionService; /** * Just a default implementation of this interface that takes the default implementations as Cassandra doesn't * have partitions. * * @author tgianos * @see ConnectorPartitionService * @since 1.0.0 */ public class CassandraConnectorPartitionService implements ConnectorPartitionService { }
9,977
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraExceptionMapper.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.datastax.driver.core.exceptions.AlreadyExistsException; import com.datastax.driver.core.exceptions.DriverException; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.server.connectors.exception.ConnectorException; import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException; import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException; import lombok.NonNull; import javax.annotation.Nonnull; /** * Convert Cassandra driver exceptions to connector exceptions. * * @author tgianos * @see com.datastax.driver.core.exceptions.DriverException * @see com.netflix.metacat.common.server.connectors.exception.ConnectorException * @since 1.0.0 */ public class CassandraExceptionMapper { /** * Convert the given Cassandra driver exception to a corresponding ConnectorException if possible, otherwise * return a generic ConnectorException. * * @param de The Cassandra driver exception * @param name The fully qualified name of the resource which was attempting to be accessed or modified at time of * error * @return A connector exception wrapping the DriverException */ public ConnectorException toConnectorException( @Nonnull @NonNull final DriverException de, @Nonnull @NonNull final QualifiedName name ) { if (de instanceof AlreadyExistsException) { final AlreadyExistsException ae = (AlreadyExistsException) de; if (ae.wasTableCreation()) { return new TableAlreadyExistsException(name, ae); } else { return new DatabaseAlreadyExistsException(name, ae); } } else { return new ConnectorException(de.getMessage(), de); } } }
9,978
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorDatabaseService.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.KeyspaceMetadata; import com.datastax.driver.core.MaterializedViewMetadata; import com.datastax.driver.core.exceptions.DriverException; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.Pageable; import com.netflix.metacat.common.dto.Sort; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService; import com.netflix.metacat.common.server.connectors.ConnectorUtils; import com.netflix.metacat.common.server.connectors.model.DatabaseInfo; import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.inject.Inject; import java.util.Comparator; import java.util.List; /** * Implementation of the database service for Cassandra. For Cassandra the {@code Keyspace} is the equivalent of a JDBC * database. * * @author tgianos * @see ConnectorDatabaseService * @since 1.0.0 */ @Slf4j public class CassandraConnectorDatabaseService extends CassandraService implements ConnectorDatabaseService { /** * Constructor. * * @param cluster The cassandra cluster connection to use * @param exceptionMapper The exception mapper to use to convert from DriverException to ConnectorException */ @Inject public CassandraConnectorDatabaseService( @Nonnull @NonNull final Cluster cluster, @Nonnull @NonNull final CassandraExceptionMapper exceptionMapper ) { super(cluster, exceptionMapper); } /** * {@inheritDoc} */ @Override public void create( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final DatabaseInfo resource ) { final String keyspace = resource.getName().getDatabaseName(); log.debug("Attempting to create a Cassandra Keyspace named {} for request {}", keyspace, context); try { // TODO: Make this take parameters for replication and the class this.executeQuery( "CREATE KEYSPACE " + keyspace + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};" ); log.debug("Successfully created Cassandra Keyspace named {} for request {}", keyspace, context); } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, resource.getName()); } } /** * {@inheritDoc} */ @Override public void delete(@Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name) { final String keyspace = name.getDatabaseName(); log.debug("Attempting to drop Cassandra keyspace {} for request {}", keyspace, context); try { this.executeQuery("DROP KEYSPACE IF EXISTS " + keyspace + ";"); log.debug("Successfully dropped {} keyspace", keyspace); } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public DatabaseInfo get( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name ) { final String keyspace = name.getDatabaseName(); log.debug("Attempting to get keyspace metadata for keyspace {} for request {}", keyspace, context); try { final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace); if (keyspaceMetadata == null) { throw new DatabaseNotFoundException(name); } log.debug("Successfully found the keyspace metadata for {} for request {}", name, context); return DatabaseInfo.builder().name(name).build(); } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public List<QualifiedName> listViewNames( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName databaseName ) { final String catalogName = databaseName.getCatalogName(); final String keyspace = databaseName.getDatabaseName(); log.debug("Attempting to get materialized view names for keyspace {} due to request {}", keyspace, context); try { final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace); if (keyspaceMetadata == null) { throw new DatabaseNotFoundException(databaseName); } final ImmutableList.Builder<QualifiedName> viewsBuilder = ImmutableList.builder(); for (final MaterializedViewMetadata view : keyspaceMetadata.getMaterializedViews()) { viewsBuilder.add( QualifiedName.ofView(catalogName, keyspace, view.getBaseTable().getName(), view.getName()) ); } final List<QualifiedName> views = viewsBuilder.build(); log.debug("Successfully found {} views for keyspace {} due to request {}", views.size(), keyspace, context); return views; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, databaseName); } } /** * {@inheritDoc} */ @Override public boolean exists( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name ) { final String keyspace = name.getDatabaseName(); log.debug("Checking if keyspace {} exists for request {}", keyspace, context); try { final boolean exists = this.getCluster().getMetadata().getKeyspace(keyspace) != null; log.debug("Keyspace {} {} for request {}", keyspace, exists ? "exists" : "doesn't exist", context); return exists; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } /** * {@inheritDoc} */ @Override public List<DatabaseInfo> list( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { log.debug("Attempting to list keyspaces for request {}", context); final ImmutableList.Builder<DatabaseInfo> keyspacesBuilder = ImmutableList.builder(); for (final QualifiedName keyspace : this.listNames(context, name, prefix, sort, pageable)) { keyspacesBuilder.add(DatabaseInfo.builder().name(keyspace).build()); } final List<DatabaseInfo> keyspaces = keyspacesBuilder.build(); log.debug("Successfully listed {} keyspaces for request {}", keyspaces.size(), context); return keyspaces; } /** * {@inheritDoc} */ @Override public List<QualifiedName> listNames( @Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { log.debug("Attempting to list keyspaces for request {}", context); try { final List<QualifiedName> names = Lists.newArrayList(); for (final KeyspaceMetadata keyspace : this.getCluster().getMetadata().getKeyspaces()) { final String keyspaceName = keyspace.getName(); if (prefix != null && !keyspaceName.startsWith(prefix.getDatabaseName())) { continue; } names.add(QualifiedName.ofDatabase(name.getCatalogName(), keyspaceName)); } if (sort != null) { // We can only really sort by the database name at this level so ignore SortBy field final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName); ConnectorUtils.sort(names, sort, comparator); } final List<QualifiedName> results = ConnectorUtils.paginate(names, pageable); log.debug("Finished listing keyspaces for request {}", context); return results; } catch (final DriverException de) { log.error(de.getMessage(), de); throw this.getExceptionMapper().toConnectorException(de, name); } } }
9,979
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorPlugin.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.netflix.metacat.common.server.connectors.ConnectorFactory; import com.netflix.metacat.common.server.connectors.ConnectorPlugin; import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter; import com.netflix.metacat.common.server.connectors.ConnectorContext; import lombok.NonNull; import javax.annotation.Nonnull; /** * Cassandra Connector Plugin. * * @author tgianos * @since 1.0.0 */ public class CassandraConnectorPlugin implements ConnectorPlugin { private static final String CONNECTOR_TYPE = "cassandra"; private static final CassandraTypeConverter TYPE_CONVERTER = new CassandraTypeConverter(); /** * {@inheritDoc} */ @Override public String getType() { return CONNECTOR_TYPE; } /** * {@inheritDoc} */ @Override public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) { return new CassandraConnectorFactory(connectorContext.getCatalogName(), connectorContext.getCatalogShardName(), connectorContext.getConfiguration()); } /** * {@inheritDoc} */ @Override public ConnectorTypeConverter getTypeConverter() { return TYPE_CONVERTER; } }
9,980
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorFactory.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.datastax.driver.core.Cluster; import com.google.common.collect.Lists; import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory; import lombok.NonNull; import javax.annotation.Nonnull; import java.util.Map; /** * Cassandra implementation of a connector factory. * * @author tgianos * @since 1.0.0 */ class CassandraConnectorFactory extends DefaultConnectorFactory { /** * Constructor. * * @param name The catalog name * @param catalogShardName The catalog shard name * @param configuration The catalog configuration */ CassandraConnectorFactory( @Nonnull @NonNull final String name, @Nonnull @NonNull final String catalogShardName, @Nonnull @NonNull final Map<String, String> configuration ) { super(name, catalogShardName, Lists.newArrayList(new CassandraConnectorModule(catalogShardName, configuration))); } /** * {@inheritDoc} */ @Override public void stop() { super.stop(); // Stop the cassandra cluster final Cluster cluster = this.getInjector().getInstance(Cluster.class); if (cluster != null) { cluster.close(); } } }
9,981
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/package-info.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Classes for connecting and getting metadata for Cassandra schemas and tables. * * @author tgianos * @since 1.0.0 */ package com.netflix.metacat.connector.cassandra;
9,982
0
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraTypeConverter.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat.connector.cassandra; import com.google.common.collect.ImmutableList; import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter; import com.netflix.metacat.common.type.ArrayType; import com.netflix.metacat.common.type.BaseType; import com.netflix.metacat.common.type.DecimalType; import com.netflix.metacat.common.type.MapType; import com.netflix.metacat.common.type.RowType; import com.netflix.metacat.common.type.Type; import com.netflix.metacat.common.type.VarbinaryType; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import javax.annotation.Nonnull; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Data type converter for Cassandra. * * @see <a href="http://cassandra.apache.org/doc/latest/cql/types.html">Cassandra Data Types</a> * @author tgianos * @since 1.0.0 */ @Slf4j public class CassandraTypeConverter implements ConnectorTypeConverter { private static final Pattern TYPE_PATTERN = Pattern.compile("^\\s*?(\\w*)\\s*?(?:<\\s*?(.*)\\s*?>)?\\s*?$"); private static final int TYPE_GROUP = 1; private static final int PARAM_GROUP = 2; private static final Pattern MAP_PARAM_PATTERN = Pattern .compile("^\\s*?((?:frozen\\s*?)?\\w*\\s*?(?:<.*>)?)\\s*?,\\s*?((?:frozen\\s*?)?\\w*\\s*?(?:<.*>)?)\\s*?$"); private static final int MAP_KEY_GROUP = 1; private static final int MAP_VALUE_GROUP = 2; private static final Pattern TUPLE_PARAM_PATTERN = Pattern.compile("(?:(\\w[\\w\\s]+(?:<[\\w+,\\s]+>\\s*?)?),?\\s*?)"); private static final int TUPLE_GROUP = 1; /** * {@inheritDoc} */ @Override public Type toMetacatType(@Nonnull @NonNull final String type) { final Matcher matcher = TYPE_PATTERN.matcher(type.toLowerCase()); // TODO: Escape case from recursion may be needed to avoid potential infinite if (matcher.matches()) { final String cqlType = matcher.group(TYPE_GROUP); switch (cqlType) { case "ascii": return BaseType.STRING; case "bigint": return BaseType.BIGINT; case "blob": return VarbinaryType.createVarbinaryType(Integer.MAX_VALUE); case "boolean": return BaseType.BOOLEAN; case "counter": return BaseType.BIGINT; case "date": return BaseType.DATE; case "decimal": return DecimalType.createDecimalType(); case "double": return BaseType.DOUBLE; case "float": return BaseType.FLOAT; case "frozen": return this.toMetacatType(matcher.group(PARAM_GROUP)); case "int": return BaseType.INT; case "list": // The possible null for the PARAM_GROUP should be handled on recursive call throwing exception return new ArrayType(this.toMetacatType(matcher.group(PARAM_GROUP))); case "map": final Matcher mapMatcher = MAP_PARAM_PATTERN.matcher(matcher.group(PARAM_GROUP)); if (mapMatcher.matches()) { return new MapType( this.toMetacatType(mapMatcher.group(MAP_KEY_GROUP)), this.toMetacatType(mapMatcher.group(MAP_VALUE_GROUP)) ); } else { throw new IllegalArgumentException("Unable to parse map params " + matcher.group(PARAM_GROUP)); } case "smallint": return BaseType.SMALLINT; case "text": return BaseType.STRING; case "time": return BaseType.TIME; case "timestamp": return BaseType.TIMESTAMP; case "tinyint": return BaseType.TINYINT; case "tuple": if (matcher.group(PARAM_GROUP) == null) { throw new IllegalArgumentException("Empty tuple param group. Unable to parse"); } final Matcher tupleMatcher = TUPLE_PARAM_PATTERN.matcher(matcher.group(PARAM_GROUP)); final ImmutableList.Builder<RowType.RowField> tupleFields = ImmutableList.builder(); int rowFieldNumber = 0; while (tupleMatcher.find()) { tupleFields.add( new RowType.RowField( this.toMetacatType(tupleMatcher.group(TUPLE_GROUP)), "field" + rowFieldNumber++ ) ); } return new RowType(tupleFields.build()); case "varchar": return BaseType.STRING; case "varint": return BaseType.INT; case "inet": case "set": case "timeuuid": case "uuid": default: log.info("Currently unsupported type {}, returning Unknown type", cqlType); return BaseType.UNKNOWN; } } else { throw new IllegalArgumentException("Unable to parse CQL type " + type); } } /** * {@inheritDoc} */ @Override public String fromMetacatType(@Nonnull @NonNull final Type type) { switch (type.getTypeSignature().getBase()) { case ARRAY: if (!(type instanceof ArrayType)) { throw new IllegalArgumentException("Expected an ArrayType and got " + type.getClass()); } final ArrayType arrayType = (ArrayType) type; return "list<" + this.getElementTypeString(arrayType.getElementType()) + ">"; case BIGINT: return "bigint"; case BOOLEAN: return "boolean"; case CHAR: // TODO: Should we make this unsupported? return "text"; case DATE: return "date"; case DECIMAL: return "decimal"; case DOUBLE: return "double"; case FLOAT: return "float"; case INT: return "int"; case INTERVAL_DAY_TO_SECOND: throw new UnsupportedOperationException("Cassandra doesn't support intervals."); case INTERVAL_YEAR_TO_MONTH: throw new UnsupportedOperationException("Cassandra doesn't support intervals."); case JSON: throw new UnsupportedOperationException("Cassandra doesn't support JSON natively."); case MAP: if (!(type instanceof MapType)) { throw new IllegalArgumentException("Was expecting MapType instead it is " + type.getClass()); } final MapType mapType = (MapType) type; final Type keyType = mapType.getKeyType(); final Type valueType = mapType.getValueType(); return "map<" + this.getElementTypeString(keyType) + ", " + this.getElementTypeString(valueType) + ">"; case ROW: if (!(type instanceof RowType)) { throw new IllegalArgumentException("Was expecting RowType instead it is " + type.getClass()); } final RowType rowType = (RowType) type; final StringBuilder tupleBuilder = new StringBuilder(); tupleBuilder.append("tuple<"); // Tuple fields don't need to be frozen boolean putComma = false; for (final RowType.RowField field : rowType.getFields()) { if (putComma) { tupleBuilder.append(", "); } else { putComma = true; } tupleBuilder.append(this.fromMetacatType(field.getType())); } tupleBuilder.append(">"); return tupleBuilder.toString(); case SMALLINT: return "smallint"; case STRING: return "text"; case TIME: return "time"; case TIME_WITH_TIME_ZONE: throw new UnsupportedOperationException("Cassandra doesn't support time with timezone"); case TIMESTAMP: return "timestamp"; case TIMESTAMP_WITH_TIME_ZONE: throw new UnsupportedOperationException("Cassandra doesn't support time with timezone"); case TINYINT: return "tinyint"; case UNKNOWN: throw new UnsupportedOperationException("Cassandra doesn't support an unknown type"); case VARBINARY: return "blob"; case VARCHAR: return "text"; default: throw new IllegalArgumentException("Unknown type: " + type.getTypeSignature().getBase()); } } private String getElementTypeString(final Type elementType) { // Nested collections must have if (elementType instanceof MapType || elementType instanceof ArrayType) { return "frozen " + this.fromMetacatType(elementType); } else { return this.fromMetacatType(elementType); } } }
9,983
0
Create_ds/metacat/metacat-war/src/main/java/com/netflix
Create_ds/metacat/metacat-war/src/main/java/com/netflix/metacat/MetacatWar.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.metacat; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.boot.web.servlet.support.SpringBootServletInitializer; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.FilterType; /** * Servlet entry point for Spring when deployed as a WAR file. * * @author tgianos * @since 1.1.0 */ @SpringBootApplication @ComponentScan(excludeFilters = @ComponentScan.Filter( type = FilterType.ASPECTJ, pattern = "com.netflix.metacat.connector..*")) public class MetacatWar extends SpringBootServletInitializer { /** * Constructor. */ public MetacatWar() { } /** * Main. * * @param args Program arguments. */ public static void main(final String[] args) { new MetacatWar() .configure(new SpringApplicationBuilder(MetacatWar.class)) .run(args); } }
9,984
0
Create_ds/metacat/metacat-war/src/main/java/com/netflix
Create_ds/metacat/metacat-war/src/main/java/com/netflix/metacat/package-info.java
/* * * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Top level package for boot application files. * * @author tgianos * @since 1.1.0 */ @ParametersAreNonnullByDefault package com.netflix.metacat; import javax.annotation.ParametersAreNonnullByDefault;
9,985
0
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector/polaris/package-info.java
/* * * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Polaris connector smoke test classes. */ package com.netflix.metacat.connector.polaris;
9,986
0
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector/polaris/PolarisStoreConnectorFunctionalTest.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig; import com.netflix.metacat.connector.polaris.store.PolarisStoreConnectorTest; import org.junit.jupiter.api.extension.ExtendWith; import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit.jupiter.SpringExtension; /** * Test persistence operations on Database objects. */ @ExtendWith(SpringExtension.class) @SpringBootTest(classes = {PolarisPersistenceConfig.class}) @ActiveProfiles(profiles = {"polaris_functional_test"}) @AutoConfigureDataJpa public class PolarisStoreConnectorFunctionalTest extends PolarisStoreConnectorTest { }
9,987
0
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/PolarisConnectorTableServiceTest.java
package com.netflix.metacat.connector.polaris; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.Pageable; import com.netflix.metacat.common.dto.Sort; import com.netflix.metacat.common.dto.SortOrder; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException; import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException; import com.netflix.metacat.common.server.connectors.model.FieldInfo; import com.netflix.metacat.common.server.connectors.model.TableInfo; import com.netflix.metacat.common.server.properties.DefaultConfigImpl; import com.netflix.metacat.common.server.properties.MetacatProperties; import com.netflix.metacat.common.server.util.ThreadServiceManager; import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter; import com.netflix.metacat.connector.hive.converters.HiveTypeConverter; import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteriaImpl; import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler; import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpWrapper; import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpsProxy; import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig; import com.netflix.metacat.connector.polaris.mappers.PolarisTableMapper; import com.netflix.metacat.connector.polaris.store.PolarisStoreService; import com.netflix.spectator.api.NoopRegistry; import org.junit.Assert; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mockito; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit.jupiter.SpringExtension; import spock.lang.Shared; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; /** * Test PolarisConnectorTableService. */ @ExtendWith(SpringExtension.class) @SpringBootTest(classes = {PolarisPersistenceConfig.class}) @ActiveProfiles(profiles = {"polarisconnectortest"}) @DirtiesContext(classMode = DirtiesContext.ClassMode.BEFORE_EACH_TEST_METHOD) @AutoConfigureDataJpa public class PolarisConnectorTableServiceTest { private static final String CATALOG_NAME = "catalog_name"; private static final String DB_NAME = "db_name"; private static final QualifiedName DB_QUALIFIED_NAME = QualifiedName.ofDatabase(CATALOG_NAME, DB_NAME); @Autowired private PolarisStoreService polarisStoreService; @Shared private ConnectorRequestContext requestContext = new ConnectorRequestContext(); @Shared private ThreadServiceManager serviceManager = Mockito.mock(ThreadServiceManager.class); @Shared private ConnectorContext connectorContext; @Shared private PolarisConnectorDatabaseService polarisDBService; @Shared private PolarisConnectorTableService polarisTableService; /** * Initialization. */ @BeforeEach public void init() { connectorContext = new ConnectorContext(CATALOG_NAME, CATALOG_NAME, "polaris", new DefaultConfigImpl(new MetacatProperties()), new NoopRegistry(), null, Maps.newHashMap()); polarisDBService = new PolarisConnectorDatabaseService(polarisStoreService, connectorContext); polarisTableService = new PolarisConnectorTableService( polarisStoreService, CATALOG_NAME, polarisDBService, new HiveConnectorInfoConverter(new HiveTypeConverter()), new IcebergTableHandler(connectorContext, new IcebergTableCriteriaImpl(connectorContext), new IcebergTableOpWrapper(connectorContext, serviceManager), new IcebergTableOpsProxy()), new PolarisTableMapper(CATALOG_NAME), connectorContext); } /** * Test empty list tables. */ @Test public void testListTablesEmpty() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, ""); final List<QualifiedName> names = polarisTableService.listNames( requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(2, 0)); Assert.assertEquals(names, Arrays.asList()); } /** * Test table exists. */ @Test public void testTableExists() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final TableInfo tableInfo = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1")) .build(); boolean exists = polarisTableService.exists(requestContext, qualifiedName); Assert.assertFalse(exists); polarisTableService.create(requestContext, tableInfo); exists = polarisTableService.exists(requestContext, qualifiedName); Assert.assertTrue(exists); } /** * Test table list. */ @Test public void testList() { final QualifiedName name1 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final TableInfo tableInfo1 = TableInfo.builder() .name(name1) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1")) .build(); polarisTableService.create(requestContext, tableInfo1); final QualifiedName name2 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table2"); final TableInfo tableInfo2 = TableInfo.builder() .name(name2) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc2")) .build(); polarisTableService.create(requestContext, tableInfo2); final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, ""); final List<TableInfo> tables = polarisTableService.list( requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(2, 0)); Assert.assertEquals(tables.size(), 2); Assert.assertEquals(tables.stream().map(TableInfo::getName).collect(Collectors.toSet()), ImmutableSet.of(name1, name2)); } /** * Test table creation then list tables. */ @Test public void testTableCreationAndList() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final TableInfo tableInfo = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1")) .build(); polarisTableService.create(requestContext, tableInfo); final List<QualifiedName> names = polarisTableService.listNames( requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(2, 0)); Assert.assertEquals(names, Arrays.asList(qualifiedName)); } /** * Test multiple table creation then list tables. */ @Test public void testMultipleTableCreationAndList() { final List<QualifiedName> createdTables = new ArrayList<>(); for (int i = 0; i < 10; i++) { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table" + i); final TableInfo tableInfo = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc" + i)) .build(); polarisTableService.create(requestContext, tableInfo); createdTables.add(qualifiedName); } final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, ""); final List<QualifiedName> names = polarisTableService.listNames( requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(20, 0)); Assert.assertEquals(names, createdTables); } /** * Test table rename. */ @Test public void testTableRename() { final QualifiedName nameOld = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final QualifiedName nameNew = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table2"); final TableInfo tableInfo = TableInfo.builder() .name(nameOld) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1")) .build(); polarisTableService.create(requestContext, tableInfo); boolean existsOld = polarisTableService.exists(requestContext, nameOld); Assert.assertTrue(existsOld); boolean existsNew = polarisTableService.exists(requestContext, nameNew); Assert.assertFalse(existsNew); polarisTableService.rename(requestContext, nameOld, nameNew); existsOld = polarisTableService.exists(requestContext, nameOld); Assert.assertFalse(existsOld); existsNew = polarisTableService.exists(requestContext, nameNew); Assert.assertTrue(existsNew); } /** * Test delete table. */ @Test public void testDeleteTable() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table"); final TableInfo tableInfo = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1")) .build(); polarisTableService.create(requestContext, tableInfo); boolean exists = polarisTableService.exists(requestContext, qualifiedName); Assert.assertTrue(exists); polarisTableService.delete(requestContext, qualifiedName); exists = polarisTableService.exists(requestContext, qualifiedName); Assert.assertFalse(exists); } /** * Test get table names. */ @Test public void testGetTableNames() { final QualifiedName name1 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final TableInfo tableInfo1 = TableInfo.builder() .name(name1) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1")) .build(); polarisTableService.create(requestContext, tableInfo1); final QualifiedName name2 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table2"); final TableInfo tableInfo2 = TableInfo.builder() .name(name2) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc2")) .build(); polarisTableService.create(requestContext, tableInfo2); final QualifiedName name3 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table3"); final TableInfo tableInfo3 = TableInfo.builder() .name(name3) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc3")) .build(); polarisTableService.create(requestContext, tableInfo3); final List<QualifiedName> tables = polarisTableService.getTableNames(requestContext, DB_QUALIFIED_NAME, "", -1); Assert.assertEquals(tables.size(), 3); Assert.assertEquals(tables, ImmutableList.of(name1, name2, name3)); } /** * Test get table using metadata json resource file. */ @Test public void testGetTable() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final String location = "src/test/resources/metadata/00001-abf48887-aa4f-4bcc-9219-1e1721314ee1.metadata.json"; final TableInfo tableInfo = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", location)) .build(); polarisTableService.create(requestContext, tableInfo); final TableInfo tableResult = polarisTableService.get(requestContext, qualifiedName); // check schema info correctly parsed from iceberg metadata file final List<FieldInfo> fields = tableResult.getFields(); Assert.assertEquals(fields.size(), 3); Assert.assertEquals(fields.get(0).getName(), "id"); Assert.assertEquals(fields.get(0).getComment(), "1st field"); Assert.assertEquals(fields.get(0).getSourceType(), "long"); Assert.assertEquals(fields.get(1).getName(), "data"); Assert.assertEquals(fields.get(1).getComment(), "2nd field"); Assert.assertEquals(fields.get(1).getSourceType(), "string"); Assert.assertEquals(fields.get(2).getName(), "dateint"); Assert.assertEquals(fields.get(2).getComment(), "3rd field"); Assert.assertEquals(fields.get(2).getSourceType(), "int"); } /** * Test table serde fields. */ @Test public void testTableSerde() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final String location = "src/test/resources/metadata/00000-9b5d4c36-130c-4288-9599-7d850c203d11.metadata.json"; final TableInfo tableInfo = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", location)) .build(); polarisTableService.create(requestContext, tableInfo); final TableInfo tableResult = polarisTableService.get(requestContext, qualifiedName); // check serde info Assert.assertNotNull(tableResult.getSerde()); Assert.assertEquals(tableResult.getSerde().getUri(), "src/test/resources"); Assert.assertEquals(tableResult.getSerde().getInputFormat(), "org.apache.hadoop.mapred.FileInputFormat"); Assert.assertEquals(tableResult.getSerde().getOutputFormat(), "org.apache.hadoop.mapred.FileOutputFormat"); Assert.assertEquals(tableResult.getSerde().getSerializationLib(), "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); } /** * Test update table reject cases. */ @Test public void testUpdateTableReject() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final String location0 = "src/test/resources/metadata/00000-9b5d4c36-130c-4288-9599-7d850c203d11.metadata.json"; final String location1 = "src/test/resources/metadata/00001-abf48887-aa4f-4bcc-9219-1e1721314ee1.metadata.json"; final String location2 = "src/test/resources/metadata/00002-2d6c1951-31d5-4bea-8edd-e35746b172f3.metadata.json"; final Map<String, String> metadata = new HashMap<>(); metadata.put("metadata_location", location0); final TableInfo tableInfo0 = TableInfo.builder().name(qualifiedName).metadata(metadata).build(); polarisTableService.create(requestContext, tableInfo0); final TableInfo tableResult0 = polarisTableService.get(requestContext, qualifiedName); Assert.assertEquals(tableResult0.getMetadata().get("metadata_location"), location0); // check update location without setting location fails metadata.put("previous_metadata_location", location1); metadata.remove("metadata_location"); final TableInfo tableInfo1 = TableInfo.builder().name(qualifiedName).metadata(metadata).build(); Assertions.assertThrows(InvalidMetaException.class, () -> polarisTableService.update(requestContext, tableInfo1)); // check update location to new location equals blank fails metadata.put("previous_metadata_location", location0); metadata.put("metadata_location", ""); final TableInfo tableInfo2 = TableInfo.builder().name(qualifiedName).metadata(metadata).build(); Assertions.assertThrows(InvalidMetaException.class, () -> polarisTableService.update(requestContext, tableInfo2)); // check update location existing and previous location do not match fails metadata.put("previous_metadata_location", location1); metadata.put("metadata_location", location2); final TableInfo tableInfo3 = TableInfo.builder().name(qualifiedName).metadata(metadata).build(); Assertions.assertThrows(TablePreconditionFailedException.class, () -> polarisTableService.update(requestContext, tableInfo3)); } /** * Test update table using metadata json resource file. */ @Test public void testUpdateTableAccept() { final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1"); final String location0 = "src/test/resources/metadata/00000-9b5d4c36-130c-4288-9599-7d850c203d11.metadata.json"; final TableInfo tableInfo0 = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("metadata_location", location0)) .build(); polarisTableService.create(requestContext, tableInfo0); final TableInfo tableResult0 = polarisTableService.get(requestContext, qualifiedName); Assert.assertEquals(tableResult0.getMetadata().get("metadata_location"), location0); final String location1 = "src/test/resources/metadata/00001-abf48887-aa4f-4bcc-9219-1e1721314ee1.metadata.json"; final TableInfo tableInfo1 = TableInfo.builder() .name(qualifiedName) .metadata(ImmutableMap.of("previous_metadata_location", location0, "metadata_location", location1)) .build(); polarisTableService.update(requestContext, tableInfo1); final TableInfo tableResult1 = polarisTableService.get(requestContext, qualifiedName); Assert.assertEquals(tableResult1.getMetadata().get("metadata_location"), location1); } }
9,988
0
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/PolarisConnectorDatabaseServiceTest.java
package com.netflix.metacat.connector.polaris; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException; import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException; import com.netflix.metacat.common.server.connectors.model.DatabaseInfo; import com.netflix.metacat.common.server.properties.DefaultConfigImpl; import com.netflix.metacat.common.server.properties.MetacatProperties; import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig; import com.netflix.metacat.connector.polaris.store.PolarisStoreService; import com.netflix.spectator.api.NoopRegistry; import org.junit.Assert; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit.jupiter.SpringExtension; import spock.lang.Shared; import java.util.List; /** * Test PolarisConnectorTableService. */ @ExtendWith(SpringExtension.class) @SpringBootTest(classes = {PolarisPersistenceConfig.class}) @ActiveProfiles(profiles = {"polarisconnectortest"}) @DirtiesContext(classMode = DirtiesContext.ClassMode.BEFORE_EACH_TEST_METHOD) @AutoConfigureDataJpa public class PolarisConnectorDatabaseServiceTest { private static final String CATALOG_NAME = "catalog_name"; private static final String DB1_NAME = "db1_name"; private static final String DB2_NAME = "db2_name"; private static final QualifiedName DB1_QUALIFIED_NAME = QualifiedName.ofDatabase(CATALOG_NAME, DB1_NAME); private static final QualifiedName DB2_QUALIFIED_NAME = QualifiedName.ofDatabase(CATALOG_NAME, DB2_NAME); @Autowired private PolarisStoreService polarisStoreService; @Shared private ConnectorContext connectorContext; @Shared private ConnectorRequestContext requestContext = new ConnectorRequestContext(); @Shared private PolarisConnectorDatabaseService polarisDBService; /** * Initialization. */ @BeforeEach public void init() { connectorContext = new ConnectorContext(CATALOG_NAME, CATALOG_NAME, "polaris", new DefaultConfigImpl(new MetacatProperties()), new NoopRegistry(), null, Maps.newHashMap()); polarisDBService = new PolarisConnectorDatabaseService(polarisStoreService, connectorContext); } /** * Test create database. */ @Test public void testCreateDb() { final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build(); polarisDBService.create(requestContext, info); Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME)); } /** * Test get database that exists. */ @Test public void testGetDb() { final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).uri("uri").build(); polarisDBService.create(requestContext, info); final DatabaseInfo result = polarisDBService.get(requestContext, DB1_QUALIFIED_NAME); Assert.assertEquals(info, result); } /** * Test get database not found. */ @Test public void testGetDbNotFound() { Assertions.assertThrows(DatabaseNotFoundException.class, () -> polarisDBService.get(requestContext, DB1_QUALIFIED_NAME)); } /** * Test create database that already exists. */ @Test public void testCreateDbAlreadyExists() { final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build(); polarisDBService.create(requestContext, info); Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME)); Assertions.assertThrows(DatabaseAlreadyExistsException.class, () -> polarisDBService.create(requestContext, info)); } /** * Test create database with no uri set should fallback to default uri. */ @Test public void testCreateDbDefaultUri() { final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build(); polarisDBService.create(requestContext, info); final DatabaseInfo infoExpected = DatabaseInfo.builder() .name(DB1_QUALIFIED_NAME).uri("db1_name.db").build(); final DatabaseInfo result = polarisDBService.get(requestContext, DB1_QUALIFIED_NAME); Assert.assertEquals(infoExpected, result); } /** * Test update database. */ @Test public void testUpdateDb() { final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).uri("uri").build(); polarisDBService.create(requestContext, info); Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME)); polarisDBService.update(requestContext, info); final DatabaseInfo result = polarisDBService.get(requestContext, DB1_QUALIFIED_NAME); Assert.assertEquals(info, result); } /** * Test delete database. */ @Test public void testDeleteDb() { final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build(); polarisDBService.create(requestContext, info); Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME)); polarisDBService.delete(requestContext, DB1_QUALIFIED_NAME); Assert.assertFalse(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME)); } /** * Test list databases. */ @Test public void testListDb() { final DatabaseInfo db1 = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).uri("uri1").build(); final DatabaseInfo db2 = DatabaseInfo.builder().name(DB2_QUALIFIED_NAME).uri("uri2").build(); polarisDBService.create(requestContext, db1); polarisDBService.create(requestContext, db2); Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME)); Assert.assertTrue(polarisDBService.exists(requestContext, DB2_QUALIFIED_NAME)); final List<QualifiedName> dbNames = polarisDBService.listNames(requestContext, QualifiedName.ofCatalog(CATALOG_NAME), null, null, null); Assert.assertEquals(Sets.newHashSet(dbNames), Sets.newHashSet(DB1_QUALIFIED_NAME, DB2_QUALIFIED_NAME)); final List<DatabaseInfo> dbs = polarisDBService.list(requestContext, QualifiedName.ofCatalog(CATALOG_NAME), null, null, null); Assert.assertEquals(Sets.newHashSet(dbs), Sets.newHashSet(db1, db2)); } }
9,989
0
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/PolarisConnectorPartitionServiceTest.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.Pageable; import com.netflix.metacat.common.dto.Sort; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.model.PartitionInfo; import com.netflix.metacat.common.server.connectors.model.PartitionListRequest; import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest; import com.netflix.metacat.common.server.connectors.model.StorageInfo; import com.netflix.metacat.common.server.connectors.model.TableInfo; import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import java.util.Arrays; import java.util.Collections; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.same; public class PolarisConnectorPartitionServiceTest { private QualifiedName tableName; private QualifiedName partitionName; private QualifiedName partitionName2; private TableInfo tableInfo; private PartitionListRequest partitionsListRequest; private PartitionsSaveRequest partitionsSaveRequest; private List<String> partitionNames; private List<String> uris; private PartitionInfo partitionInfo; private PartitionInfo partitionInfo2; private Sort sort; @Mock private ConnectorRequestContext requestContext; @Mock private ConnectorContext connectorContext; @Mock private IcebergTableHandler icebergTableHandler; @Mock private PolarisConnectorTableService tableService; private PolarisConnectorPartitionService polaris; @Before public void setup() { MockitoAnnotations.initMocks(this); tableName = QualifiedName.fromString("catalog/db/table"); partitionName = QualifiedName.fromString("catalog/db/table/dateint=20230101"); partitionName2 = QualifiedName.fromString("catalog/db/table/dateint=20230102"); tableInfo = TableInfo.builder().name(tableName).build(); partitionsListRequest = new PartitionListRequest(); partitionsSaveRequest = new PartitionsSaveRequest(); partitionNames = Arrays.asList("p1", "p2"); uris = Arrays.asList("u1", "u2"); partitionInfo = new PartitionInfo(); partitionInfo2 = new PartitionInfo(); sort = new Sort(); polaris = new PolarisConnectorPartitionService(connectorContext, icebergTableHandler, tableService); } @Test public void getPartitions() { partitionsListRequest.setFilter("filter"); partitionsListRequest.setPartitionNames(Collections.singletonList("dateint=20230101")); partitionsListRequest.setPageable(new Pageable(1, 0)); partitionsListRequest.setSort(sort); doReturn(Arrays.asList(partitionInfo, partitionInfo2)) .when(icebergTableHandler).getPartitions( same(tableInfo), same(connectorContext), eq("filter"), eq(Collections.singletonList("dateint=20230101")), same(sort)); final List<PartitionInfo> partitions = polaris.getPartitions(requestContext, tableName, partitionsListRequest, tableInfo); assertThat(partitions).isEqualTo(Collections.singletonList(partitionInfo)); } @Test public void getPartitionKeys() { partitionInfo.setName(partitionName); partitionInfo2.setName(partitionName2); partitionsListRequest.setFilter("filter"); partitionsListRequest.setPartitionNames(Collections.singletonList("dateint=20230101")); partitionsListRequest.setPageable(new Pageable(2, 0)); partitionsListRequest.setSort(sort); doReturn(Arrays.asList(partitionInfo, partitionInfo2)) .when(icebergTableHandler).getPartitions( same(tableInfo), same(connectorContext), eq("filter"), eq(Collections.singletonList("dateint=20230101")), same(sort)); final List<String> partitionKeys = polaris.getPartitionKeys(requestContext, tableName, partitionsListRequest, tableInfo); assertThat(partitionKeys).isEqualTo(Arrays.asList("dateint=20230101", "dateint=20230102")); } @Test public void get() { partitionInfo.setName(partitionName); partitionInfo2.setName(partitionName2); doReturn(tableInfo).when(tableService) .get(requestContext, QualifiedName.ofTable("catalog", "db", "table")); doReturn(Arrays.asList(partitionInfo, partitionInfo2)) .when(icebergTableHandler).getPartitions( same(tableInfo), same(connectorContext), eq(null), eq(Collections.singletonList("dateint=20230101")), eq(null)); final PartitionInfo partition = polaris.get(requestContext, partitionName); assertThat(partition).isSameAs(partitionInfo); } @Test public void getPartitionNames() { assertThatThrownBy(() -> polaris.getPartitionNames(requestContext, uris, true)) .isInstanceOf(UnsupportedOperationException.class); } @Test public void getPartitionUris() { partitionInfo.setName(partitionName); partitionInfo2.setName(partitionName2); partitionInfo.setSerde(StorageInfo.builder().uri("uri1").build()); partitionInfo2.setSerde(StorageInfo.builder().uri("uri2").build()); doReturn(Arrays.asList(partitionInfo, partitionInfo2)) .when(icebergTableHandler).getPartitions( same(tableInfo), same(connectorContext), eq("filter"), eq(Collections.singletonList("dateint=20230101")), same(sort)); partitionsListRequest.setFilter("filter"); partitionsListRequest.setPartitionNames(Collections.singletonList("dateint=20230101")); partitionsListRequest.setPageable(new Pageable(1, 1)); partitionsListRequest.setSort(sort); final List<String> partitionUris = polaris.getPartitionUris(requestContext, tableName, partitionsListRequest, tableInfo); assertThat(partitionUris).isEqualTo(Collections.singletonList("uri2")); } @Test public void getPartitionCount() { doReturn(Arrays.asList(partitionInfo, partitionInfo2)) .when(icebergTableHandler).getPartitions( same(tableInfo), same(connectorContext), eq(null), eq(null), eq(null)); assertThat(polaris.getPartitionCount(requestContext, tableName, tableInfo)).isEqualTo(2); } @Test public void create() { assertThatThrownBy(() -> polaris.create(requestContext, partitionInfo)) .isInstanceOf(UnsupportedOperationException.class); } @Test public void update() { assertThatThrownBy(() -> polaris.update(requestContext, partitionInfo)) .isInstanceOf(UnsupportedOperationException.class); } @Test public void delete() { assertThatThrownBy(() -> polaris.delete(requestContext, partitionName)) .isInstanceOf(UnsupportedOperationException.class); } @Test public void savePartitions() { assertThatThrownBy(() -> polaris.savePartitions(requestContext, tableName, partitionsSaveRequest)) .isInstanceOf(UnsupportedOperationException.class); } @Test public void deletePartitions() { assertThatThrownBy(() -> polaris.deletePartitions(requestContext, tableName, partitionNames, tableInfo)) .isInstanceOf(UnsupportedOperationException.class); } }
9,990
0
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/package-info.java
/* * * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Polaris test classes. */ package com.netflix.metacat.connector.polaris;
9,991
0
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/store/PolarisStoreConnectorTest.java
package com.netflix.metacat.connector.polaris.store; import com.netflix.metacat.connector.polaris.common.PolarisUtils; import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig; import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity; import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity; import com.netflix.metacat.connector.polaris.store.repos.PolarisDatabaseRepository; import com.netflix.metacat.connector.polaris.store.repos.PolarisTableRepository; import org.junit.Assert; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.dao.DataAccessException; import org.springframework.dao.OptimisticLockingFailureException; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit.jupiter.SpringExtension; import java.util.List; import java.util.Optional; import java.util.Random; /** * Test persistence operations on Database objects. */ @ExtendWith(SpringExtension.class) @SpringBootTest(classes = {PolarisPersistenceConfig.class}) @ActiveProfiles(profiles = {"polaristest"}) @AutoConfigureDataJpa public class PolarisStoreConnectorTest { private static final String DB_NAME_FOO = "foo"; private static final String TBL_NAME_BAR = "bar"; private static final String DEFAULT_METACAT_USER = "metacat_user"; private static Random random = new Random(System.currentTimeMillis()); @Autowired private PolarisDatabaseRepository repo; @Autowired private PolarisTableRepository tblRepo; @Autowired private PolarisStoreConnector polarisConnector; private static String generateDatabaseName() { return DB_NAME_FOO + "_" + random.nextLong(); } private static String generateTableName() { return TBL_NAME_BAR + "_" + random.nextLong(); } private PolarisDatabaseEntity createDB(final String dbName) { final String location = "file://temp"; final PolarisDatabaseEntity entity = polarisConnector.createDatabase(dbName, location, "metacat_user"); // assert that database exists, post-creation. Assert.assertTrue(polarisConnector.databaseExistsById(entity.getDbId())); Assert.assertTrue(polarisConnector.databaseExists(dbName)); Assert.assertEquals(0L, entity.getVersion().longValue()); Assert.assertTrue(entity.getDbId().length() > 0); Assert.assertEquals(dbName, entity.getDbName()); Assert.assertEquals(location, entity.getLocation()); Assert.assertEquals(DEFAULT_METACAT_USER, entity.getAudit().getCreatedBy()); final Optional<PolarisDatabaseEntity> fetchedEntity = polarisConnector.getDatabase(dbName); Assert.assertTrue(fetchedEntity.isPresent()); Assert.assertEquals(entity, fetchedEntity.get()); return entity; } private PolarisTableEntity createTable(final String dbName, final String tblName) { final PolarisTableEntity entity = polarisConnector.createTable(dbName, tblName, "loc", PolarisUtils.DEFAULT_METACAT_USER); Assert.assertTrue(polarisConnector.tableExistsById(entity.getTblId())); Assert.assertTrue(polarisConnector.tableExists(dbName, tblName)); Assert.assertTrue(entity.getTblId().length() > 0); Assert.assertTrue(entity.getVersion() >= 0); Assert.assertEquals(dbName, entity.getDbName()); Assert.assertEquals(tblName, entity.getTblName()); final Optional<PolarisTableEntity> fetchedEntity = polarisConnector.getTable(dbName, tblName); Assert.assertTrue(fetchedEntity.isPresent()); Assert.assertEquals(entity, fetchedEntity.get()); return entity; } /** * Test Database object creation and persistence. */ @Test public void testCreateDB() { final PolarisDatabaseEntity savedEntity = createDB(generateDatabaseName()); } /** * Test that a table cannot be created if database is absent. */ @Test public void testTableCreationFailIfDatabaseIsAbsent() { Assertions.assertThrows(DataAccessException.class, () -> polarisConnector.createTable(generateDatabaseName(), generateTableName(), "loc", PolarisUtils.DEFAULT_METACAT_USER)); } /** * Test table creation if database exists. * Verify table deletion */ @Test public void testTableCreationAndDeletion() { final String dbName = generateDatabaseName(); final String tblName = generateTableName(); final PolarisDatabaseEntity dbEntity = createDB(dbName); final PolarisTableEntity tblEntity = createTable(dbName, tblName); polarisConnector.deleteTable(dbName, tblName); Assert.assertFalse(polarisConnector.tableExistsById(tblEntity.getTblId())); } /** * Test to verify that table names fetch works. */ @Test public void testPaginatedFetch() { final String dbName = generateDatabaseName(); final PolarisDatabaseEntity dbEntity = createDB(dbName); List<String> tblNames = polarisConnector.getTables(dbName, ""); Assert.assertEquals(0, tblNames.size()); final String tblNameA = "A_" + generateTableName(); final String tblNameB = "B_" + generateTableName(); final String tblNameC = "C_" + generateTableName(); createTable(dbName, tblNameA); createTable(dbName, tblNameB); createTable(dbName, tblNameC); tblNames = polarisConnector.getTables(dbName, ""); Assert.assertEquals(3, tblNames.size()); Assert.assertEquals(tblNameA, tblNames.get(0)); Assert.assertEquals(tblNameB, tblNames.get(1)); Assert.assertEquals(tblNameC, tblNames.get(2)); } /** * Test to verify that table name can be updated. */ @Test public void testTableUpdate() { // Create Table Entity in DB final String dbName = generateDatabaseName(); final String tblName = generateTableName(); final PolarisDatabaseEntity dbEntity = createDB(dbName); final PolarisTableEntity tblEntity = createTable(dbName, tblName); // Update table name final String newTblName = generateTableName(); tblEntity.setTblName(newTblName); final PolarisTableEntity updatedTblEntity = polarisConnector.saveTable(tblEntity); Assert.assertEquals(newTblName, updatedTblEntity.getTblName()); } /** * Test to validate that the table can be created via a PolarisTableEntity parameter. * Also tests that metadata_location is getting stored. */ @Test public void createTableWithSaveApi() { final String dbName = generateDatabaseName(); createDB(dbName); final String tblName = generateTableName(); final String metadataLocation = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/foo"; final PolarisTableEntity e = new PolarisTableEntity(dbName, tblName, "metacatuser"); e.setMetadataLocation(metadataLocation); final PolarisTableEntity savedEntity = polarisConnector.saveTable(e); Assert.assertEquals(metadataLocation, savedEntity.getMetadataLocation()); } /** * Test to verify that compare-and-swap update of the metadata location works as expected. */ @Test public void updateMetadataLocation() { final String dbName = generateDatabaseName(); createDB(dbName); final String tblName = generateTableName(); final String metadataLocation = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/foo"; final PolarisTableEntity e = new PolarisTableEntity(dbName, tblName, "metacatuser"); e.setMetadataLocation(metadataLocation); final PolarisTableEntity savedEntity = polarisConnector.saveTable(e); final String newLocation = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/bar"; // update should fail since the expected location is not going to match. boolean updatedSuccess = polarisConnector.updateTableMetadataLocation( dbName, tblName, "unexpected_location", newLocation, PolarisUtils.DEFAULT_METACAT_USER); Assert.assertFalse(updatedSuccess); // successful update should happen. updatedSuccess = polarisConnector.updateTableMetadataLocation(dbName, tblName, metadataLocation, newLocation, "new_user"); Assert.assertTrue(updatedSuccess); final PolarisTableEntity updatedEntity = polarisConnector. getTable(dbName, tblName).orElseThrow(() -> new RuntimeException("Expected to find saved entity")); Assert.assertEquals(updatedEntity.getPreviousMetadataLocation(), metadataLocation); // after the successful update, the same call should fail, since the current metadataLocation has changed. updatedSuccess = polarisConnector.updateTableMetadataLocation(dbName, tblName, metadataLocation, newLocation, PolarisUtils.DEFAULT_METACAT_USER); Assert.assertFalse(updatedSuccess); } /** * Test updateLocation(...) while save(...) is called in interleaved fashion. */ @Test public void updateMetadataLocationWithInterleavedSave() { final String dbName = generateDatabaseName(); createDB(dbName); final String tblName = generateTableName(); final String location0 = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/location0"; final PolarisTableEntity e = new PolarisTableEntity(dbName, tblName, "metacatuser"); e.setMetadataLocation(location0); final PolarisTableEntity savedEntity = polarisConnector.saveTable(e); final String location1 = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/location1"; // update the metadata location. final boolean updatedSuccess = polarisConnector.updateTableMetadataLocation(dbName, tblName, location0, location1, "new_user"); Assert.assertTrue(updatedSuccess); final String location2 = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/location2"; // At this point, savedEntity is stale, and any updates to savedEntity should not be allowed // to persist. savedEntity.setMetadataLocation(location2); Assertions.assertThrows(OptimisticLockingFailureException.class, () -> { polarisConnector.saveTable(savedEntity); }); } }
9,992
0
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/store/package-info.java
/* * * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * Polaris connector test classes. */ package com.netflix.metacat.connector.polaris.store;
9,993
0
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorPartitionService.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorPartitionService; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.ConnectorUtils; import com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException; import com.netflix.metacat.common.server.connectors.model.PartitionInfo; import com.netflix.metacat.common.server.connectors.model.PartitionListRequest; import com.netflix.metacat.common.server.connectors.model.TableInfo; import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler; import lombok.NonNull; import lombok.RequiredArgsConstructor; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; /** * Partition service for Iceberg tables in Polaris. * * Currently, supports read-only methods with the exception of getPartitionNames. */ @RequiredArgsConstructor public class PolarisConnectorPartitionService implements ConnectorPartitionService { private final ConnectorContext context; private final IcebergTableHandler icebergTableHandler; private final PolarisConnectorTableService tableService; /** * {@inheritDoc}. */ @Override public List<PartitionInfo> getPartitions(@NonNull final ConnectorRequestContext requestContext, @NonNull final QualifiedName tableName, @NonNull final PartitionListRequest partitionsRequest, @NonNull final TableInfo tableInfo) { return ConnectorUtils.paginate( icebergTableHandler.getPartitions( tableInfo, context, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(), partitionsRequest.getSort() ), partitionsRequest.getPageable() ); } /** * {@inheritDoc}. */ @Override public List<String> getPartitionKeys(@NonNull final ConnectorRequestContext requestContext, @NonNull final QualifiedName tableName, @NonNull final PartitionListRequest partitionsRequest, @NonNull final TableInfo tableInfo) { return getPartitions(requestContext, tableName, partitionsRequest, tableInfo).stream() .map(info -> info.getName().getPartitionName()) .collect(Collectors.toList()); } /** * {@inheritDoc}. */ @Override public int getPartitionCount(@NonNull final ConnectorRequestContext requestContext, @NonNull final QualifiedName table, @NonNull final TableInfo tableInfo) { return icebergTableHandler.getPartitions( tableInfo, context, null, // filer expression null, // partition ids null // sort ).size(); } /** * {@inheritDoc}. */ @Override public List<String> getPartitionUris(@NonNull final ConnectorRequestContext requestContext, @NonNull final QualifiedName table, @NonNull final PartitionListRequest partitionsRequest, @NonNull final TableInfo tableInfo) { return getPartitions(requestContext, table, partitionsRequest, tableInfo).stream() .map(partitionInfo -> partitionInfo.getSerde().getUri()) .collect(Collectors.toList()); } /** * {@inheritDoc}. */ @Override public PartitionInfo get(@NonNull final ConnectorRequestContext requestContext, @NonNull final QualifiedName partitionName) { final QualifiedName tableName = QualifiedName.ofTable( partitionName.getCatalogName(), partitionName.getDatabaseName(), partitionName.getTableName() ); final TableInfo tableInfo = tableService.get(requestContext, tableName); final List<PartitionInfo> partitions = icebergTableHandler.getPartitions( tableInfo, context, null, Collections.singletonList(partitionName.getPartitionName()), null ); return partitions.stream() .filter(partitionInfo -> partitionInfo.getName().equals(partitionName)) .findFirst() .orElseThrow(() -> new PartitionNotFoundException(tableName, partitionName.getPartitionName())); } }
9,994
0
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorDatabaseService.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.Pageable; import com.netflix.metacat.common.dto.Sort; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.ConnectorUtils; import com.netflix.metacat.common.server.connectors.exception.ConnectorException; import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException; import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException; import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException; import com.netflix.metacat.common.server.connectors.model.DatabaseInfo; import com.netflix.metacat.connector.polaris.common.PolarisUtils; import com.netflix.metacat.connector.polaris.mappers.PolarisDatabaseMapper; import com.netflix.metacat.connector.polaris.store.PolarisStoreService; import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity; import lombok.extern.slf4j.Slf4j; import org.springframework.dao.DataIntegrityViolationException; import javax.annotation.Nullable; import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.stream.Collectors; /** * database service for polaris connector. */ @Slf4j public class PolarisConnectorDatabaseService implements ConnectorDatabaseService { private static final String DEFAULT_LOCATION_SUFFIX = ".db"; private static final String DB_DEFAULT_LOCATION = "polaris.db-default-location"; private final String defaultLocationPrefix; private final PolarisStoreService polarisStoreService; /** * Constructor. * * @param polarisStoreService polaris connector * @param connectorContext connector context */ public PolarisConnectorDatabaseService( final PolarisStoreService polarisStoreService, final ConnectorContext connectorContext ) { this.polarisStoreService = polarisStoreService; this.defaultLocationPrefix = connectorContext.getConfiguration().getOrDefault(DB_DEFAULT_LOCATION, ""); } /** * {@inheritDoc}. */ @Override public void create(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) { final QualifiedName name = databaseInfo.getName(); final String createdBy = PolarisUtils.getUserOrDefault(context); // check exists then create in non-transactional optimistic manner if (exists(context, name)) { throw new DatabaseAlreadyExistsException(name); } try { final String location = databaseInfo.getUri() == null ? this.defaultLocationPrefix + name.getDatabaseName() + DEFAULT_LOCATION_SUFFIX : databaseInfo.getUri(); this.polarisStoreService.createDatabase(name.getDatabaseName(), location, createdBy); } catch (DataIntegrityViolationException exception) { throw new InvalidMetaException(name, exception); } catch (Exception exception) { throw new ConnectorException( String.format("Failed creating polaris database %s", name), exception); } } /** * {@inheritDoc}. */ @Override public void delete(final ConnectorRequestContext context, final QualifiedName name) { // check exists then delete in non-transactional optimistic manner if (!exists(context, name)) { throw new DatabaseNotFoundException(name); } try { this.polarisStoreService.deleteDatabase(name.getDatabaseName()); } catch (DataIntegrityViolationException exception) { throw new InvalidMetaException(name, exception); } catch (Exception exception) { throw new ConnectorException( String.format("Failed deleting polaris database %s", name), exception); } } /** * {@inheritDoc}. */ @Override public void update(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) { final QualifiedName name = databaseInfo.getName(); try { final PolarisDatabaseEntity db = polarisStoreService.getDatabase(name.getDatabaseName()) .orElseThrow(() -> new DatabaseNotFoundException(name)); // currently db objects have no mutable fields so this is noop db.getAudit().setLastModifiedBy(PolarisUtils.getUserOrDefault(context)); polarisStoreService.saveDatabase(db.toBuilder().build()); } catch (DatabaseNotFoundException exception) { log.error(String.format("Not found exception for polaris database %s", name), exception); throw exception; } catch (DataIntegrityViolationException exception) { throw new InvalidMetaException(name, exception); } catch (Exception exception) { throw new ConnectorException( String.format("Failed updating polaris database %s", databaseInfo.getName()), exception); } } /** * {@inheritDoc}. */ @Override public DatabaseInfo get(final ConnectorRequestContext context, final QualifiedName name) { try { final PolarisDatabaseMapper mapper = new PolarisDatabaseMapper(name.getCatalogName()); final PolarisDatabaseEntity db = polarisStoreService.getDatabase(name.getDatabaseName()) .orElseThrow(() -> new DatabaseNotFoundException(name)); return mapper.toInfo(db); } catch (DatabaseNotFoundException exception) { log.error(String.format("Not found exception for polaris database %s", name), exception); throw exception; } catch (Exception exception) { throw new ConnectorException( String.format("Failed get polaris database %s", name), exception); } } /** * {@inheritDoc}. */ @Override public boolean exists(final ConnectorRequestContext context, final QualifiedName name) { try { return polarisStoreService.getDatabase(name.getDatabaseName()).isPresent(); } catch (Exception exception) { throw new ConnectorException( String.format("Failed exists polaris database %s", name), exception); } } /** * {@inheritDoc}. */ @Override public List<QualifiedName> listNames( final ConnectorRequestContext context, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { try { List<QualifiedName> qualifiedNames = polarisStoreService.getAllDatabases().stream() .map(d -> QualifiedName.ofDatabase(name.getCatalogName(), d.getDbName())) .collect(Collectors.toCollection(ArrayList::new)); if (prefix != null) { qualifiedNames = qualifiedNames.stream() .filter(n -> n.startsWith(prefix)) .collect(Collectors.toCollection(ArrayList::new)); } if (sort != null) { ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString)); } return ConnectorUtils.paginate(qualifiedNames, pageable); } catch (Exception exception) { throw new ConnectorException( String.format("Failed databases list names polaris prefix %s", prefix), exception); } } /** * {@inheritDoc}. */ @Override public List<DatabaseInfo> list( final ConnectorRequestContext context, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { try { final PolarisDatabaseMapper mapper = new PolarisDatabaseMapper(name.getCatalogName()); List<PolarisDatabaseEntity> dbs = polarisStoreService.getAllDatabases(); if (prefix != null) { dbs = dbs.stream() .filter(n -> QualifiedName.ofDatabase(name.getCatalogName(), n.getDbName()).startsWith(prefix)) .collect(Collectors.toCollection(ArrayList::new)); } if (sort != null) { ConnectorUtils.sort(dbs, sort, Comparator.comparing(p -> p.getDbName())); } return ConnectorUtils.paginate(dbs, pageable).stream() .map(d -> mapper.toInfo(d)).collect(Collectors.toList()); } catch (Exception exception) { throw new ConnectorException( String.format("Failed databases list polaris prefix %s", prefix), exception); } } }
9,995
0
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorFactory.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService; import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter; import com.netflix.metacat.common.server.connectors.ConnectorPartitionService; import com.netflix.metacat.common.server.connectors.ConnectorTableService; import com.netflix.metacat.common.server.connectors.SpringConnectorFactory; import com.netflix.metacat.connector.polaris.configs.PolarisConnectorConfig; import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig; import org.springframework.core.env.MapPropertySource; import java.util.Collections; /** * Connector Factory for Polaris. */ class PolarisConnectorFactory extends SpringConnectorFactory { /** * Constructor. * * @param infoConverter info converter * @param connectorContext connector config */ PolarisConnectorFactory( final ConnectorInfoConverter infoConverter, final ConnectorContext connectorContext ) { super(infoConverter, connectorContext); super.registerClazz(PolarisConnectorConfig.class, PolarisPersistenceConfig.class); super.addEnvProperties(new MapPropertySource( "polaris_connector", Collections.unmodifiableMap(connectorContext.getConfiguration()))); super.refresh(); } @Override public ConnectorPartitionService getPartitionService() { return ctx.getBean(PolarisConnectorPartitionService.class); } /** * {@inheritDoc} */ @Override public ConnectorDatabaseService getDatabaseService() { return this.ctx.getBean(PolarisConnectorDatabaseService.class); } /** * {@inheritDoc} */ @Override public ConnectorTableService getTableService() { return this.ctx.getBean(PolarisConnectorTableService.class); } }
9,996
0
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisExceptionMapper.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.server.connectors.exception.ConnectorException; import lombok.NonNull; import javax.annotation.Nonnull; /** * Class to convert Iceberg client exceptions to connector exceptions. */ public class PolarisExceptionMapper { /** * Convert the given Iceberg exception to a ConnectorException. * * @param e The Iceberg client exception * @param name The fully qualified name of the resource attempted to be accessed or modified at time of error * @return A connector exception wrapping the DriverException */ public ConnectorException toConnectorException( @Nonnull @NonNull final Exception e, @Nonnull @NonNull final QualifiedName name ) { // TODO: handling for exception more types return new ConnectorException(e.getMessage()); } }
9,997
0
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorTableService.java
package com.netflix.metacat.connector.polaris; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.metacat.common.QualifiedName; import com.netflix.metacat.common.dto.Pageable; import com.netflix.metacat.common.dto.Sort; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorRequestContext; import com.netflix.metacat.common.server.connectors.ConnectorTableService; import com.netflix.metacat.common.server.connectors.ConnectorUtils; import com.netflix.metacat.common.server.connectors.exception.ConnectorException; import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException; import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException; import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException; import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException; import com.netflix.metacat.common.server.connectors.model.TableInfo; import com.netflix.metacat.common.server.properties.Config; import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter; import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler; import com.netflix.metacat.connector.hive.iceberg.IcebergTableWrapper; import com.netflix.metacat.connector.hive.sql.DirectSqlTable; import com.netflix.metacat.connector.hive.util.HiveTableUtil; import com.netflix.metacat.connector.polaris.common.PolarisUtils; import com.netflix.metacat.connector.polaris.mappers.PolarisTableMapper; import com.netflix.metacat.connector.polaris.store.PolarisStoreService; import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import org.springframework.cache.annotation.Cacheable; import org.springframework.dao.DataIntegrityViolationException; import javax.annotation.Nullable; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; /** * table service for polaris connector. */ @Slf4j public class PolarisConnectorTableService implements ConnectorTableService { protected final PolarisStoreService polarisStoreService; protected final PolarisConnectorDatabaseService polarisConnectorDatabaseService; protected final HiveConnectorInfoConverter connectorConverter; protected final ConnectorContext connectorContext; protected final IcebergTableHandler icebergTableHandler; protected final PolarisTableMapper polarisTableMapper; protected final String catalogName; /** * Constructor. * * @param polarisStoreService polaris connector * @param catalogName catalog name * @param polarisConnectorDatabaseService connector database service * @param connectorConverter converter * @param icebergTableHandler iceberg table handler * @param polarisTableMapper polaris table polarisTableMapper * @param connectorContext the connector context */ public PolarisConnectorTableService( final PolarisStoreService polarisStoreService, final String catalogName, final PolarisConnectorDatabaseService polarisConnectorDatabaseService, final HiveConnectorInfoConverter connectorConverter, final IcebergTableHandler icebergTableHandler, final PolarisTableMapper polarisTableMapper, final ConnectorContext connectorContext ) { this.polarisStoreService = polarisStoreService; this.polarisConnectorDatabaseService = polarisConnectorDatabaseService; this.connectorConverter = connectorConverter; this.connectorContext = connectorContext; this.icebergTableHandler = icebergTableHandler; this.polarisTableMapper = polarisTableMapper; this.catalogName = catalogName; } /** * {@inheritDoc}. */ @Override public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) { final QualifiedName name = tableInfo.getName(); final String createdBy = PolarisUtils.getUserOrDefault(requestContext); // check exists then create in non-transactional optimistic manner if (exists(requestContext, name)) { throw new TableAlreadyExistsException(name); } try { final PolarisTableEntity entity = polarisTableMapper.toEntity(tableInfo); polarisStoreService.createTable(entity.getDbName(), entity.getTblName(), entity.getMetadataLocation(), createdBy); } catch (DataIntegrityViolationException | InvalidMetaException exception) { throw new InvalidMetaException(name, exception); } catch (Exception exception) { final String msg = String.format("Failed creating polaris table %s", name); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * {@inheritDoc}. */ @Override public void rename( final ConnectorRequestContext context, final QualifiedName oldName, final QualifiedName newName ) { // check exists then rename in non-transactional optimistic manner if (exists(context, newName)) { throw new TableAlreadyExistsException(newName); } try { final String lastModifiedBy = PolarisUtils.getUserOrDefault(context); final PolarisTableEntity table = polarisStoreService .getTable(oldName.getDatabaseName(), oldName.getTableName()) .orElseThrow(() -> new TableNotFoundException(oldName)); table.getAudit().setLastModifiedBy(lastModifiedBy); polarisStoreService.saveTable(table.toBuilder().tblName(newName.getTableName()).build()); } catch (TableNotFoundException exception) { log.error(String.format("Not found exception for polaris table %s", oldName), exception); throw exception; } catch (DataIntegrityViolationException exception) { throw new InvalidMetaException(oldName, exception); } catch (Exception exception) { final String msg = String.format("Failed renaming polaris table %s", oldName); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * {@inheritDoc}. */ @Override public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) { try { final PolarisTableEntity polarisTableEntity = polarisStoreService .getTable(name.getDatabaseName(), name.getTableName()) .orElseThrow(() -> new TableNotFoundException(name)); final TableInfo info = polarisTableMapper.toInfo(polarisTableEntity); final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info); // Return the iceberg table with just the metadata location included if requested. if (connectorContext.getConfig().shouldFetchOnlyMetadataLocationEnabled() && requestContext.isIncludeMetadataLocationOnly()) { return TableInfo.builder() .metadata(Maps.newHashMap(info.getMetadata())) .fields(Collections.emptyList()) .build(); } return getIcebergTable(name, tableLoc, info, requestContext.isIncludeMetadata(), connectorContext.getConfig().isIcebergCacheEnabled()); } catch (TableNotFoundException | IllegalArgumentException exception) { log.error(String.format("Not found exception for polaris table %s", name), exception); throw exception; } catch (ConnectorException connectorException) { log.error("Encountered connector exception for polaris table {}. {}", name, connectorException); throw connectorException; } catch (Exception exception) { final String msg = String.format("Failed getting polaris table %s", name); log.error(msg, exception); throw exception; } } /** * {@inheritDoc}. */ @Override public List<QualifiedName> listNames( final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { try { final List<QualifiedName> qualifiedNames = Lists.newArrayList(); final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : ""; for (String tableName : polarisStoreService.getTables(name.getDatabaseName(), tableFilter)) { final QualifiedName qualifiedName = QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName); if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) { continue; } qualifiedNames.add(qualifiedName); } if (sort != null) { ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString)); } return ConnectorUtils.paginate(qualifiedNames, pageable); } catch (Exception exception) { final String msg = String.format("Failed polaris list table names %s using prefix %s", name, prefix); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * {@inheritDoc}. */ @Override public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) { final QualifiedName name = tableInfo.getName(); final Config conf = connectorContext.getConfig(); final String lastModifiedBy = PolarisUtils.getUserOrDefault(requestContext); icebergTableHandler.update(tableInfo); try { final Map<String, String> newTableMetadata = tableInfo.getMetadata(); if (MapUtils.isEmpty(newTableMetadata)) { log.warn("No parameters defined for iceberg table %s, no data update needed", name); return; } final String prevLoc = newTableMetadata.get(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION); final String newLoc = newTableMetadata.get(DirectSqlTable.PARAM_METADATA_LOCATION); if (StringUtils.isBlank(prevLoc)) { log.info("Provided previous {} empty for {} with new {}, treating as no location update needed.", prevLoc, name, newLoc); return; } if (StringUtils.isBlank(newLoc)) { final String message = String.format( "Invalid metadata for %s. Provided previous %s or new %s location is empty.", name, prevLoc, newLoc); log.error(message); throw new InvalidMetaException(name, message, null); } if (conf.isIcebergPreviousMetadataLocationCheckEnabled() && !icebergTableHandler.doesMetadataLocationExist(name, prevLoc)) { final String message = String.format( "Provided previous metadata location: %s for table: %s does not exist.", name, prevLoc); log.error(message); throw new InvalidMetaException(name, message, null); } // optimistically attempt to update metadata location final boolean updated = polarisStoreService.updateTableMetadataLocation( name.getDatabaseName(), name.getTableName(), prevLoc, newLoc, lastModifiedBy); // if succeeded then done, else try to figure out why and throw corresponding exception if (updated) { requestContext.setIgnoreErrorsAfterUpdate(true); log.warn("Success servicing Iceberg commit request for table: {}, " + "previousLocation: {}, newLocation: {}", tableInfo.getName(), prevLoc, newLoc); return; } final PolarisTableEntity table = polarisStoreService .getTable(name.getDatabaseName(), name.getTableName()) .orElseThrow(() -> new TableNotFoundException(name)); final String existingLoc = table.getMetadataLocation(); log.warn("Error servicing Iceberg commit request for tableId: {}, " + "previousLocation: {}, existingLocation: {}, newLocation: {}", table.getTblId(), prevLoc, existingLoc, newLoc); if (StringUtils.isBlank(existingLoc)) { final String message = String.format( "Invalid metadata location for %s existing location is empty.", name); log.error(message); throw new TablePreconditionFailedException(name, message, existingLoc, prevLoc); } if (StringUtils.equalsIgnoreCase(existingLoc, newLoc)) { log.warn("Existing metadata location is the same as new. Existing: {}, New: {}", existingLoc, newLoc); return; } if (!Objects.equals(existingLoc, prevLoc)) { final String message = String.format( "Invalid metadata location for %s expected: %s, provided: %s", name, existingLoc, prevLoc); log.error(message); throw new TablePreconditionFailedException(name, message, existingLoc, prevLoc); } } catch (TableNotFoundException | InvalidMetaException | TablePreconditionFailedException exception) { throw exception; } catch (DataIntegrityViolationException exception) { throw new InvalidMetaException(name, exception); } catch (Exception exception) { final String msg = String.format("Failed updating polaris table %s", tableInfo.getName()); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * {@inheritDoc}. */ @Override public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) { try { return polarisStoreService.tableExists(name.getDatabaseName(), name.getTableName()); } catch (Exception exception) { final String msg = String.format("Failed exists polaris table %s", name); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * {@inheritDoc}. */ @Override public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) { // check exists then delete in non-transactional optimistic manner if (!exists(requestContext, name)) { throw new TableNotFoundException(name); } try { polarisStoreService.deleteTable(name.getDatabaseName(), name.getTableName()); } catch (DataIntegrityViolationException exception) { throw new InvalidMetaException(name, exception); } catch (Exception exception) { final String msg = String.format("Failed deleting polaris table %s", name); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * {@inheritDoc}. */ @Override public List<TableInfo> list( final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable ) { try { final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : ""; final List<PolarisTableEntity> tbls = polarisStoreService.getTableEntities(name.getDatabaseName(), tableFilter); if (sort != null) { ConnectorUtils.sort(tbls, sort, Comparator.comparing(t -> t.getTblName())); } return ConnectorUtils.paginate(tbls, pageable).stream() .map(t -> polarisTableMapper.toInfo(t)).collect(Collectors.toList()); } catch (Exception exception) { final String msg = String.format("Failed polaris list tables %s using prefix %s", name, prefix); log.error(msg, exception); throw new ConnectorException(msg, exception); } } /** * Return the table metadata from cache if exists else make the iceberg call and refresh it. * @param tableName table name * @param tableMetadataLocation table metadata location * @param info table info stored in hive metastore * @param includeInfoDetails if true, will include more details like the manifest file content * @param useCache true, if table can be retrieved from cache * @return TableInfo */ @Cacheable(key = "'iceberg.table.' + #includeInfoDetails + '.' + #tableMetadataLocation", condition = "#useCache") public TableInfo getIcebergTable(final QualifiedName tableName, final String tableMetadataLocation, final TableInfo info, final boolean includeInfoDetails, final boolean useCache) { final IcebergTableWrapper icebergTable = this.icebergTableHandler.getIcebergTable(tableName, tableMetadataLocation, includeInfoDetails); return connectorConverter.fromIcebergTableToTableInfo(tableName, icebergTable, tableMetadataLocation, info); } @Override public List<QualifiedName> getTableNames( final ConnectorRequestContext context, final QualifiedName name, final String filter, @Nullable final Integer limit) { try { if (!Strings.isNullOrEmpty(filter)) { // workaround for trino issue, hive param filters not supported on iceberg tables log.warn(String.format("Calling Polaris getTableNames with nonempty filter %s", filter)); } final List<String> databaseNames = name.isDatabaseDefinition() ? ImmutableList.of(name.getDatabaseName()) : polarisStoreService.getAllDatabases().stream().map(d -> d.getDbName()).collect(Collectors.toList()); int limitSize = limit == null || limit < 0 ? Integer.MAX_VALUE : limit; final List<QualifiedName> result = Lists.newArrayList(); for (int i = 0; i < databaseNames.size() && limitSize > 0; i++) { final String databaseName = databaseNames.get(i); final List<String> tableNames = polarisStoreService.getTables(name.getDatabaseName(), ""); result.addAll(tableNames.stream() .map(n -> QualifiedName.ofTable(name.getCatalogName(), databaseName, n)) .limit(limitSize) .collect(Collectors.toList())); limitSize = limitSize - tableNames.size(); } return result; } catch (Exception exception) { final String msg = String.format("Failed polaris get table names using %s", name); log.error(msg, exception); throw new ConnectorException(msg, exception); } } }
9,998
0
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorPlugin.java
package com.netflix.metacat.connector.polaris; import com.netflix.metacat.common.server.connectors.ConnectorContext; import com.netflix.metacat.common.server.connectors.ConnectorFactory; import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter; import com.netflix.metacat.common.server.connectors.ConnectorPlugin; import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter; import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter; import com.netflix.metacat.connector.hive.converters.HiveTypeConverter; import lombok.NonNull; import javax.annotation.Nonnull; /** * Polaris Connector Plugin. */ public class PolarisConnectorPlugin implements ConnectorPlugin { private static final String CONNECTOR_TYPE = "polaris"; private static final HiveTypeConverter TYPE_CONVERTER = new HiveTypeConverter(); private static final HiveConnectorInfoConverter INFO_CONVERTER = new HiveConnectorInfoConverter(TYPE_CONVERTER); /** * {@inheritDoc} */ @Override public String getType() { return CONNECTOR_TYPE; } /** * {@inheritDoc} */ @Override public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) { return new PolarisConnectorFactory(INFO_CONVERTER, connectorContext); } /** * {@inheritDoc} */ @Override public ConnectorTypeConverter getTypeConverter() { return TYPE_CONVERTER; } /** * {@inheritDoc} */ @Override public ConnectorInfoConverter getInfoConverter() { return INFO_CONVERTER; } }
9,999