code
stringlengths 73
34.1k
| label
stringclasses 1
value |
---|---|
public BeanId getFirstReference(final String propertyName) {
List<BeanId> refrences = getReference(propertyName);
if (refrences == null || refrences.size() < 1) {
return null;
}
return refrences.get(0);
} | java |
public static Optional<CacheManager> lookup() {
CacheManager manager = lookup.lookup(CacheManager.class);
if (manager != null) {
return Optional.of(manager);
} else {
return Optional.absent();
}
} | java |
public static String percentEncode(String source) throws AuthException {
try {
return URLEncoder.encode(source, "UTF-8")
.replace("+", "%20")
.replace("*", "%2A")
.replace("%7E", "~");
} catch (UnsupportedEncodingException ex) {
throw new AuthException("cannot encode value '" + source + "'", ex);
}
} | java |
public static String percentDecode(String source) throws AuthException {
try {
return URLDecoder.decode(source, "UTF-8");
} catch (java.io.UnsupportedEncodingException ex) {
throw new AuthException("cannot decode value '" + source + "'", ex);
}
} | java |
public static <T> T wrapTempFileList(T original, com.aoindustries.io.TempFileList tempFileList, Wrapper<T> wrapper) {
if(tempFileList != null) {
return wrapper.call(original, tempFileList);
} else {
// Warn once
synchronized(tempFileWarningLock) {
if(!tempFileWarned) {
if(logger.isLoggable(Level.WARNING)) {
logger.log(
Level.WARNING,
"TempFileContext not initialized: refusing to automatically create temp files for large buffers. "
+ "Additional heap space may be used for large requests. "
+ "Please add the " + TempFileContext.class.getName() + " filter to your web.xml file.",
new Throwable("Stack Trace")
);
}
tempFileWarned = true;
}
}
return original;
}
} | java |
public List<Method> listMethods( final Class<?> classObj,
final String methodName )
{
//
// Get the array of methods for my classname.
//
Method[] methods = classObj.getMethods();
List<Method> methodSignatures = new ArrayList<Method>();
//
// Loop round all the methods and print them out.
//
for ( int ii = 0; ii < methods.length; ++ii )
{
if ( methods[ii].getName().equals( methodName ) )
{
methodSignatures.add( methods[ii] );
}
}
return methodSignatures;
} | java |
private Map<Integer, Double> predict(final double[] x) {
Map<Integer, Double> result = new HashMap<>();
for (int i = 0; i < model.weights.length; i++) {
double y = VectorUtils.dotProduct(x, model.weights[i]);
y += model.bias[i];
result.put(i, y);
}
return result;
} | java |
public void onlineTrain(final double[] x, final int labelIndex) {
Map<Integer, Double> result = predict(x);
Map.Entry<Integer, Double> maxResult = result.entrySet().stream().max((e1, e2) -> e1.getValue().compareTo(e2.getValue())).orElse(null);
if (maxResult.getKey() != labelIndex) {
double e_correction_d = 1;
model.weights[labelIndex] = reweight(x, model.weights[labelIndex], e_correction_d);
model.bias[labelIndex] = e_correction_d;
double w_correction_d = -1;
model.weights[maxResult.getKey()] = reweight(x, model.weights[maxResult.getKey()], w_correction_d);
model.bias[maxResult.getKey()] = w_correction_d;
}
if (LOG.isDebugEnabled()) {
LOG.debug("New bias: " + Arrays.toString(model.bias));
LOG.debug("New weight: " + Arrays.stream(model.weights).map(Arrays::toString).reduce((wi, wii) -> wi + ", " + wii).get());
}
} | java |
@Override
public Map<String, Double> predict(Tuple predict) {
Map<Integer, Double> indexResult = predict(predict.vector.getVector());
return indexResult.entrySet().stream()
.map(e -> new ImmutablePair<>(model.labelIndexer.getLabel(e.getKey()), VectorUtils.sigmoid.apply(e.getValue()))) // Only do sigmoid here!
.collect(Collectors.toMap(ImmutablePair::getLeft, ImmutablePair::getRight));
} | java |
private boolean isTrimEnabled() {
String contentType = response.getContentType();
// If the contentType is the same string (by identity), return the previously determined value.
// This assumes the same string instance is returned by the response when content type not changed between calls.
if(contentType!=isTrimEnabledCacheContentType) {
isTrimEnabledCacheResult =
contentType==null
|| contentType.equals("application/xhtml+xml")
|| contentType.startsWith("application/xhtml+xml;")
|| contentType.equals("text/html")
|| contentType.startsWith("text/html;")
|| contentType.equals("application/xml")
|| contentType.startsWith("application/xml;")
|| contentType.equals("text/xml")
|| contentType.startsWith("text/xml;")
;
isTrimEnabledCacheContentType = contentType;
}
return isTrimEnabledCacheResult;
} | java |
private boolean processChar(char c) {
if(inTextArea) {
if(
c==TrimFilterWriter.textarea_close[readCharMatchCount]
|| c==TrimFilterWriter.TEXTAREA_CLOSE[readCharMatchCount]
) {
readCharMatchCount++;
if(readCharMatchCount>=TrimFilterWriter.textarea_close.length) {
inTextArea=false;
readCharMatchCount=0;
}
} else {
readCharMatchCount=0;
}
return true;
} else if(inPre) {
if(
c==TrimFilterWriter.pre_close[preReadCharMatchCount]
|| c==TrimFilterWriter.PRE_CLOSE[preReadCharMatchCount]
) {
preReadCharMatchCount++;
if(preReadCharMatchCount>=TrimFilterWriter.pre_close.length) {
inPre=false;
preReadCharMatchCount=0;
}
} else {
preReadCharMatchCount=0;
}
return true;
} else {
if(c=='\r') {
readCharMatchCount = 0;
preReadCharMatchCount = 0;
// Carriage return only output when no longer at the beginning of the line
return !atLineStart;
} else if(c=='\n') {
readCharMatchCount = 0;
preReadCharMatchCount = 0;
// Newline only output when no longer at the beginning of the line
if(!atLineStart) {
atLineStart = true;
return true;
} else {
return false;
}
} else if(c==' ' || c=='\t') {
readCharMatchCount = 0;
preReadCharMatchCount = 0;
// Space and tab only output when no longer at the beginning of the line
return !atLineStart;
} else {
atLineStart = false;
if(
c==TrimFilterWriter.textarea[readCharMatchCount]
|| c==TrimFilterWriter.TEXTAREA[readCharMatchCount]
) {
readCharMatchCount++;
if(readCharMatchCount>=TrimFilterWriter.textarea.length) {
inTextArea=true;
readCharMatchCount=0;
}
} else {
readCharMatchCount=0;
}
if(
c==TrimFilterWriter.pre[preReadCharMatchCount]
|| c==TrimFilterWriter.PRE[preReadCharMatchCount]
) {
preReadCharMatchCount++;
if(preReadCharMatchCount>=TrimFilterWriter.pre.length) {
inPre=true;
preReadCharMatchCount=0;
}
} else {
preReadCharMatchCount=0;
}
return true;
}
}
} | java |
public int set( final int flags )
{
for (;;)
{
int current = _flags.get();
int newValue = current | flags;
if ( _flags.compareAndSet( current, newValue ) )
{
return current;
}
}
} | java |
public int unset( final int flags )
{
for (;;)
{
int current = _flags.get();
int newValue = current & ~flags;
if ( _flags.compareAndSet( current, newValue ) )
{
return current;
}
}
} | java |
public int change( final int add,
final int remove )
{
for (;;)
{
int current = _flags.get();
int newValue = ( current | add ) & ~remove;
if ( _flags.compareAndSet( current, newValue ) )
{
return current;
}
}
} | java |
public static <E> String message(Response<E> response) {
return Optional.ofNullable(response).map(Response::getMessage).orElse(StringUtils.EMPTY);
} | java |
public static DTree convertTreeBankToCoNLLX(final String constituentTree) {
Tree tree = Tree.valueOf(constituentTree);
SemanticHeadFinder headFinder = new SemanticHeadFinder(false); // keep copula verbs as head
Collection<TypedDependency> dependencies = new EnglishGrammaticalStructure(tree, string -> true, headFinder).typedDependencies();
List<CoreLabel> tokens = tree.taggedLabeledYield();
StanfordParser.tagLemma(tokens);
return StanfordTreeBuilder.generate(tokens, dependencies, null);
} | java |
private String addLocale(Locale locale, String url, String encodedParamName, String encoding) {
// Split the anchor
int poundPos = url.lastIndexOf('#');
String beforeAnchor;
String anchor;
if(poundPos==-1) {
beforeAnchor = url;
anchor = null;
} else {
anchor = url.substring(poundPos);
beforeAnchor = url.substring(0, poundPos);
}
// Only add for non-excluded file types
if(isLocalizedPath(beforeAnchor)) {
int questionPos = beforeAnchor.lastIndexOf('?');
// Only rewrite a URL that does not already contain a paramName parameter.
if(
questionPos == -1
|| (
!beforeAnchor.startsWith("?"+encodedParamName+"=", questionPos)
&& beforeAnchor.indexOf("&"+encodedParamName+"=", questionPos + 1) == -1
)
) {
try {
beforeAnchor += (questionPos == -1 ? '?' : '&') + encodedParamName + '=' + URLEncoder.encode(toLocaleString(locale), encoding);
} catch(UnsupportedEncodingException e) {
// Should never happen with standard supported encoding
throw new WrappedException(e);
}
}
return
(anchor != null)
? (beforeAnchor + anchor)
: beforeAnchor
;
} else {
// Unmodified
return url;
}
} | java |
public static Map<String,Locale> getEnabledLocales(ServletRequest request) {
@SuppressWarnings("unchecked")
Map<String,Locale> enabledLocales = (Map<String,Locale>)request.getAttribute(ENABLED_LOCALES_REQUEST_ATTRIBUTE_KEY);
if(enabledLocales==null) throw new IllegalStateException("Not in request filtered by LocaleFilter, unable to get enabled locales.");
return enabledLocales;
} | java |
protected boolean isLocalizedPath(String url) {
int questionPos = url.lastIndexOf('?');
String lowerPath = (questionPos==-1 ? url : url.substring(0, questionPos)).toLowerCase(Locale.ROOT);
return
// Matches SessionResponseWrapper
// Matches NoSessionFilter
!lowerPath.endsWith(".bmp")
&& !lowerPath.endsWith(".css")
&& !lowerPath.endsWith(".exe")
&& !lowerPath.endsWith(".gif")
&& !lowerPath.endsWith(".ico")
&& !lowerPath.endsWith(".jpeg")
&& !lowerPath.endsWith(".jpg")
&& !lowerPath.endsWith(".js")
&& !lowerPath.endsWith(".png")
&& !lowerPath.endsWith(".svg")
&& !lowerPath.endsWith(".txt")
&& !lowerPath.endsWith(".zip")
;
} | java |
protected String toLocaleString(Locale locale) {
String language = locale.getLanguage();
if(language.isEmpty()) return "";
String country = locale.getCountry();
if(country.isEmpty()) return language;
String variant = locale.getVariant();
if(variant.isEmpty()) {
return language + '-' + country;
} else {
return language + '-' + country + '-' + variant;
}
} | java |
@Override
public void loadModel(InputStream modelIs) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
IOUtils.copy(modelIs, baos);
} catch (IOException e) {
LOG.error("Load model err.", e);
}
InputStream isForSVMLoad = new ByteArrayInputStream(baos.toByteArray());
try (ZipInputStream zipInputStream = new ZipInputStream(isForSVMLoad)) {
ZipEntry entry;
while ((entry = zipInputStream.getNextEntry()) != null) {
if (entry.getName().endsWith(".model")) {
BufferedReader br = new BufferedReader(new InputStreamReader(zipInputStream, Charset.defaultCharset()));
this.model = svm.svm_load_model(br);
}
}
} catch (IOException e) {
// Do Nothing.
}
modelIs = new ByteArrayInputStream(baos.toByteArray());
try (ZipInputStream zipInputStream = new ZipInputStream(modelIs)) {
ZipEntry entry;
while ((entry = zipInputStream.getNextEntry()) != null) {
if (entry.getName().endsWith(".lbindexer")) {
String lbIndexer = IOUtils.toString(zipInputStream, Charset.defaultCharset());
this.labelIndexer = new LabelIndexer(new ArrayList<>());
this.labelIndexer.readFromSerializedString(lbIndexer);
}
}
} catch (IOException e) {
LOG.error("Err in load LabelIndexer", e);
}
} | java |
static String getJavaScriptUnicodeEscapeString(char ch) {
int chInt = (int)ch;
if(chInt>=ENCODE_RANGE_1_START && chInt<ENCODE_RANGE_1_END) {
return javaScriptUnicodeEscapeStrings1[chInt - ENCODE_RANGE_1_START];
}
if(chInt>=ENCODE_RANGE_2_START && chInt<ENCODE_RANGE_2_END) {
return javaScriptUnicodeEscapeStrings2[chInt - ENCODE_RANGE_2_START];
}
if(chInt>=ENCODE_RANGE_3_START && chInt<ENCODE_RANGE_3_END) {
return javaScriptUnicodeEscapeStrings3[chInt - ENCODE_RANGE_3_START];
}
// No encoding needed
return null;
} | java |
@Override
public Map<String, Double> predict(Tuple predict) {
Map<Integer, Double> labelProb = new HashMap<>();
for (Integer labelIndex : model.labelIndexer.getIndexSet()) {
double likelihood = 1.0D;
for (int i = 0; i < predict.vector.getVector().length; i++) {
double fi = predict.vector.getVector()[i];
likelihood = likelihood * VectorUtils.gaussianPDF(model.meanVectors[labelIndex][i], model.varianceVectors[labelIndex][i], fi);
}
double posterior = model.labelPrior.get(labelIndex) * likelihood; // prior*likelihood, This is numerator of posterior
labelProb.put(labelIndex, posterior);
}
double evidence = labelProb.values().stream().reduce((e1, e2) -> e1 + e2).orElse(-1D);
if (evidence == -1) {
LOG.error("Evidence is Empty!");
return new HashMap<>();
}
labelProb.entrySet().forEach(entry -> {
double prob = entry.getValue() / evidence;
labelProb.put(entry.getKey(), prob);
}); // This is denominator of posterior
Map<String, Double> result = model.labelIndexer.convertMapKey(labelProb);
if (predict.label == null || predict.label.isEmpty()) { // Just for write to predict tuple.
predict.label = result.entrySet().stream().max((e1, e2) -> e1.getValue().compareTo(e2.getValue())).map(Entry::getKey).orElse(StringUtils.EMPTY);
}
return result;
} | java |
public static void splitData(final String originalTrainingDataFile) {
List<Tuple> trainingData = NaiveBayesClassifier.readTrainingData(originalTrainingDataFile, "\\s");
List<Tuple> wrongData = new ArrayList<>();
int lastTrainingDataSize;
int iterCount = 0;
do {
System.out.println("Iteration:\t" + (++iterCount));
lastTrainingDataSize = trainingData.size();
NaiveBayesClassifier nbc = new NaiveBayesClassifier();
nbc.train(trainingData);
Iterator<Tuple> trainingDataIter = trainingData.iterator();
while (trainingDataIter.hasNext()) {
Tuple t = trainingDataIter.next();
String actual = nbc.predictLabel(t);
if (!t.label.equals(actual) && !t.label.equals("1")) { // preserve 1 since too few.
wrongData.add(t);
trainingDataIter.remove();
}
}
Iterator<Tuple> wrongDataIter = wrongData.iterator();
while (wrongDataIter.hasNext()) {
Tuple t = wrongDataIter.next();
String actual = nbc.predictLabel(t);
if (t.label.equals(actual)) {
trainingData.add(t);
wrongDataIter.remove();
}
}
} while (trainingData.size() != lastTrainingDataSize);
writeToFile(trainingData, originalTrainingDataFile + ".aligned");
writeToFile(wrongData, originalTrainingDataFile + ".wrong");
} | java |
public void addFilePart(final String fieldName, final InputStream stream, final String contentType)
throws IOException
{
addFilePart(fieldName, stream, null, contentType);
} | java |
public void addFilePart(final String fieldName, final URL urlToUploadFile)
throws IOException
{
//
// Maybe try and extract a filename from the last part of the url?
// Or have the user pass it in?
// Or just leave it blank as I have already done?
//
addFilePart(fieldName,
urlToUploadFile.openStream(),
null,
URLConnection.guessContentTypeFromName(urlToUploadFile.toString()));
} | java |
public void addHeaderField(final String name, final String value)
{
writer.append(name + ": " + value).append(LINE_FEED);
writer.flush();
} | java |
public HttpResponse finish() throws IOException
{
writer.append("--" + boundary + "--").append(LINE_FEED);
writer.flush();
try {
return doFinish();
} finally {
writer.close();
}
} | java |
public double update(final double units) {
final double speed;
lock.lock();
try {
final long currentTime = System.nanoTime();
final long timeDifference = (currentTime - lastUpdateTime) / C1; // nanoseconds to micros
if (timeDifference >= averagingPeriod) {
speed = units / averagingPeriod;
cachedSpeed = speed;
lastUpdateTime = currentTime;
elapsedTime = ZERO_TIME;
quantity = ZERO_UNITS;
} else {
if (timeDifference > ZERO_TIME) {
lastUpdateTime = currentTime;
elapsedTime += timeDifference;
}
if (units != ZERO_UNITS) {
quantity += units;
}
if (elapsedTime >= averagingPeriod) {
speed = quantity / elapsedTime;
cachedSpeed = speed;
elapsedTime = ZERO_TIME;
quantity = ZERO_UNITS;
} else {
speed = (cachedSpeed * (averagingPeriod - elapsedTime) + quantity) / averagingPeriod;
}
}
} finally {
lock.unlock();
}
return speed * C0; // units per micro to units per second
} | java |
public static <T> Optional<T> toBean(String json, Class<T> clazz) {
if (StringUtils.isBlank(json)) {
log.warn("json is blank. ");
return Optional.empty();
}
try {
OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
return Optional.of(OBJECT_MAPPER.readValue(json, clazz));
} catch (Exception e) {
log.error(e.getMessage(), e);
return Optional.empty();
}
} | java |
public static <T> String toJson(T t) {
if (Objects.isNull(t)) {
log.warn("t is blank. ");
return "";
}
try {
return OBJECT_MAPPER.writeValueAsString(t);
} catch (Exception e) {
log.error(e.getMessage(), e);
return "";
}
} | java |
@RequestMapping(produces = MediaType.APPLICATION_JSON_VALUE, value = "/parse", method = RequestMethod.GET)
public String parse(@RequestParam("sentence") String sentence, HttpServletRequest request) {
if (sentence == null || sentence.trim().isEmpty()) {
return StringUtils.EMPTY;
}
sentence = sentence.trim();
LOGGER.info("Parse [" + sentence + "]");
DTree tree = PARSER.parse(sentence);
DTreeEntity parseTreeEntity = new DTreeEntity(tree, "SAMPLE_AUTHOR");
dNodeEntityRepository.save(parseTreeEntity.dNodeEntities);
dTreeEntityRepository.save(parseTreeEntity);
return "[" + toJSON(tree.get(0)) + "]";
} | java |
public static <T> T resolveValue(ValueExpression expression, Class<T> type, ELContext elContext) {
if(expression == null) {
return null;
} else {
return type.cast(expression.getValue(elContext));
}
} | java |
public static <T> T resolveValue(Object value, Class<T> type, ELContext elContext) {
if(value == null) {
return null;
} else if(value instanceof ValueExpression) {
return resolveValue((ValueExpression)value, type, elContext);
} else {
return type.cast(value);
}
} | java |
public static MediaValidator getMediaValidator(MediaType contentType, Writer out) throws MediaException {
// If the existing out is already validating for this type, use it.
// This occurs when one validation validates to a set of characters that are a subset of the requested validator.
// For example, a URL is always valid TEXT.
if(out instanceof MediaValidator) {
MediaValidator inputValidator = (MediaValidator)out;
if(inputValidator.isValidatingMediaInputType(contentType)) return inputValidator;
}
// Add filter if needed for the given type
switch(contentType) {
case JAVASCRIPT:
case JSON:
case LD_JSON:
return new JavaScriptValidator(out, contentType);
case SH:
return new ShValidator(out);
case MYSQL:
return new MysqlValidator(out);
case PSQL:
return new PsqlValidator(out);
case TEXT:
return new TextValidator(out);
case URL:
return new UrlValidator(out);
case XHTML:
return new XhtmlValidator(out);
case XHTML_ATTRIBUTE:
return new XhtmlAttributeValidator(out);
default:
throw new MediaException(ApplicationResources.accessor.getMessage("MediaValidator.unableToFindValidator", contentType.getContentType()));
}
} | java |
@Override
public boolean getAllowRobots(ServletContext servletContext, HttpServletRequest request, HttpServletResponse response, Page page) {
return false;
} | java |
public Map<String, Double> convertMapKey(Map<Integer, Double> probs) {
Map<String, Double> stringKeyProb = new HashMap<>();
probs.entrySet().forEach(e -> stringKeyProb.put(getLabel(e.getKey()), e.getValue()));
return stringKeyProb;
} | java |
public static List<ValidationMessage> validateMediaType(TagData data, List<ValidationMessage> messages) {
Object o = data.getAttribute("type");
if(
o != null
&& o != TagData.REQUEST_TIME_VALUE
&& !(o instanceof MediaType)
) {
String type = Coercion.toString(o);
try {
// First allow shortcuts (matching enum names)
MediaType mediaType = MediaType.getMediaTypeByName(type);
if(mediaType == null) {
mediaType = MediaType.getMediaTypeForContentType(type);
}
// Value is OK
} catch(MediaException err) {
messages = MinimalList.add(
messages,
new ValidationMessage(
data.getId(),
err.getMessage()
)
);
}
}
return messages;
} | java |
public static List<ValidationMessage> validateScope(TagData data, List<ValidationMessage> messages) {
Object o = data.getAttribute("scope");
if(
o != null
&& o != TagData.REQUEST_TIME_VALUE
) {
String scope = Coercion.toString(o);
try {
Scope.getScopeId(scope);
// Value is OK
} catch(JspTagException err) {
messages = MinimalList.add(
messages,
new ValidationMessage(
data.getId(),
err.getMessage()
)
);
}
}
return messages;
} | java |
public static double distanceKms(BigDecimal lat1, BigDecimal lng1, BigDecimal lat2, BigDecimal lng2 ){
return new GeoCoordinate(lat1, lng1).distanceTo(new GeoCoordinate(lat2, lng2));
} | java |
public static <K, V> MapBuilder<K, V> map(Map<K, V> instance) {
return new MapBuilder<>(instance);
} | java |
public void run(final List<Tuple> data) {
List<Tuple> dataCopy = new ArrayList<>(data);
this.labels = data.parallelStream().map(x -> x.label).collect(Collectors.toSet());
if (shuffleData) {
Collections.shuffle(dataCopy);
}
int chunkSize = data.size() / nfold;
int reminder = data.size() % chunkSize;
for (int i = data.size() - 1; i > data.size() - 1 - reminder; i--) {
LOG.info("Dropping the tail id: " + data.get(i).id);
}
for (int i = 0; i < nfold; i++) {
System.err.println("Cross validation round " + (i + 1) + "/" + nfold);
List<Tuple> testing = new ArrayList<>(data.subList(i, i + chunkSize));
List<Tuple> training = new ArrayList<>(data.subList(0, i));
training.addAll(data.subList(i + chunkSize, data.size()));
eval(training, testing, i);
}
} | java |
private void eval(List<Tuple> training, List<Tuple> testing, int nfold) {
classifier.train(training);
for (Tuple tuple : testing) {
String actual = classifier.predict(tuple).entrySet().stream()
.max((e1, e2) -> e1.getValue().compareTo(e2.getValue()))
.map(Map.Entry::getKey).orElse(StringUtils.EMPTY);
updateScore(tuple, actual, nfold);
}
} | java |
private static String filter(String s) {
int len = s.length();
StringBuilder filtered = new StringBuilder(len);
int pos = 0;
while(pos < len) {
char ch1 = s.charAt(pos++);
if(Character.isHighSurrogate(ch1)) {
// Handle surrogates
if(pos < len) {
char ch2 = s.charAt(pos++);
if(Character.isLowSurrogate(ch2)) {
if(isValidCharacter(Character.toCodePoint(ch1, ch2))) {
filtered.append(ch1).append(ch2);
}
} else {
// High surrogate not followed by low surrogate, invalid
}
} else {
// High surrogate at end of string, invalid
}
} else {
// Not surrogates
if(isValidCharacter(ch1)) {
filtered.append(ch1);
}
}
}
assert filtered.length() <= len;
return filtered.length() != len ? filtered.toString() : s;
} | java |
protected void doTag(Writer out) throws JspException, IOException {
JspFragment body = getJspBody();
if(body!=null) {
// Check for JspWriter to avoid a JspWriter wrapping a JspWriter
body.invoke(
(out instanceof JspWriter)
? null
: out
);
}
} | java |
private static BinRelation extractPolar(DTree tree) {
// TODO: HERE.
DNode rootVerb = tree.getRoots().get(0);
// rootVerb.getChildren().
BinRelation binRelation = new BinRelation();
return binRelation;
} | java |
private Event createEvent(String obs) {
int lastSpace = obs.lastIndexOf(StringUtils.SPACE);
Event event = null;
if (lastSpace != -1) {
String label = obs.substring(lastSpace + 1);
String[] contexts = obs.substring(0, lastSpace).split("\\s+");
// Split name and value
float[] values = RealValueFileEventStream.parseContexts(contexts);
// Label, feature name, feature value
event = new Event(label, contexts, values);
}
return event;
} | java |
public static List<CoreLabel> stanfordTokenize(String str) {
TokenizerFactory<? extends HasWord> tf = PTBTokenizer.coreLabelFactory();
// ptb3Escaping=false -> '(' not converted as '-LRB-', Dont use it, it will cause Dependency resolution err.
Tokenizer<? extends HasWord> originalWordTokenizer = tf.getTokenizer(new StringReader(str), "ptb3Escaping=false");
Tokenizer<? extends HasWord> tokenizer = tf.getTokenizer(new StringReader(str));
List<? extends HasWord> originalTokens = originalWordTokenizer.tokenize();
List<? extends HasWord> tokens = tokenizer.tokenize();
// Curse you Stanford!
List<CoreLabel> coreLabels = new ArrayList<>(tokens.size());
for (int i = 0; i < tokens.size(); i++) {
CoreLabel coreLabel = new CoreLabel();
coreLabel.setWord(tokens.get(i).word());
coreLabel.setOriginalText(originalTokens.get(i).word());
coreLabel.setValue(tokens.get(i).word());
coreLabel.setBeginPosition(((CoreLabel) tokens.get(i)).beginPosition());
coreLabel.setEndPosition(((CoreLabel) tokens.get(i)).endPosition());
coreLabels.add(coreLabel);
}
return coreLabels;
} | java |
public void tagPOS(List<CoreLabel> tokens) {
if (posTagger == null) {
if (POS_TAGGER_MODEL_PATH == null) {
LOG.warn("Default POS Tagger model");
POS_TAGGER_MODEL_PATH = StanfordConst.STANFORD_DEFAULT_POS_EN_MODEL;
}
posTagger = new MaxentTagger(POS_TAGGER_MODEL_PATH);
}
List<TaggedWord> posList = posTagger.tagSentence(tokens);
for (int i = 0; i < tokens.size(); i++) {
String pos = posList.get(i).tag();
tokens.get(i).setTag(pos);
}
} | java |
public static void tagLemma(List<CoreLabel> tokens) {
// Not sure if this can be static.
Morphology morpha = new Morphology();
for (CoreLabel token : tokens) {
String lemma;
String pos = token.tag();
if (pos.equals(LangLib.POS_NNPS)) {
pos = LangLib.POS_NNS;
}
if (pos.length() > 0) {
String phrasalVerb = phrasalVerb(morpha, token.word(), pos);
if (phrasalVerb == null) {
lemma = morpha.lemma(token.word(), pos);
} else {
lemma = phrasalVerb;
}
} else {
lemma = morpha.stem(token.word());
}
// LGLibEn.convertUnI only accept cap I.
if (lemma.equals("i")) {
lemma = "I";
}
token.setLemma(lemma);
}
} | java |
public synchronized void tagNamedEntity(List<CoreLabel> tokens) {
boolean isPOSTagged = tokens.parallelStream().filter(x -> x.tag() == null).count() == 0;
if (!isPOSTagged) {
throw new RuntimeException("Please Run POS Tagger before Named Entity tagger.");
}
if (ners != null) {
try {
ners.stream().forEach(ner -> ner.classify(tokens));
} catch (Exception e) {
/* edu.stanford.nlp.util.RuntimeInterruptedException: java.lang.InterruptedException
at edu.stanford.nlp.util.HashIndex.addToIndex(HashIndex.java:173) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher$BranchStates.newBid(SequenceMatcher.java:902) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher$MatchedStates.<init>(SequenceMatcher.java:1288) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher.getStartStates(SequenceMatcher.java:709) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher.findMatchStartBacktracking(SequenceMatcher.java:488) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher.findMatchStart(SequenceMatcher.java:449) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher.find(SequenceMatcher.java:341) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher.findNextNonOverlapping(SequenceMatcher.java:365) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ling.tokensregex.SequenceMatcher.find(SequenceMatcher.java:437) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.NumberNormalizer.findNumbers(NumberNormalizer.java:452) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.NumberNormalizer.findAndMergeNumbers(NumberNormalizer.java:721) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.time.TimeExpressionExtractorImpl.extractTimeExpressions(TimeExpressionExtractorImpl.java:184) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.time.TimeExpressionExtractorImpl.extractTimeExpressions(TimeExpressionExtractorImpl.java:178) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.time.TimeExpressionExtractorImpl.extractTimeExpressionCoreMaps(TimeExpressionExtractorImpl.java:116) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.time.TimeExpressionExtractorImpl.extractTimeExpressionCoreMaps(TimeExpressionExtractorImpl.java:104) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.regexp.NumberSequenceClassifier.runSUTime(NumberSequenceClassifier.java:340) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.regexp.NumberSequenceClassifier.classifyWithSUTime(NumberSequenceClassifier.java:138) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.regexp.NumberSequenceClassifier.classifyWithGlobalInformation(NumberSequenceClassifier.java:101) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.NERClassifierCombiner.recognizeNumberSequences(NERClassifierCombiner.java:267) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.NERClassifierCombiner.classifyWithGlobalInformation(NERClassifierCombiner.java:231) ~[stanford-corenlp-3.5.2.jar:3.5.2]
at edu.stanford.nlp.ie.NERClassifierCombiner.classify(NERClassifierCombiner.java:218) ~[stanford-corenlp-3.5.2.jar:3.5.2]
*/
LOG.warn("NER Classifier err for: " + tokens.stream().map(CoreLabel::word).collect(Collectors.joining(StringUtils.SPACE)));
}
}
} | java |
private Tuple<String,String> getOverrideEntry( final String key )
{
for ( String prefix : _overrides )
{
String override = prefix + "." + key;
String value = getPropertyValue( override );
if ( value != null )
{
return new Tuple<String,String>( override, value );
}
}
return null;
} | java |
private Tuple<String,String> getEntry( final String key )
{
Tuple<String,String> override = getOverrideEntry( key );
if ( override == null )
{
String value = getPropertyValue( key );
if ( value != null )
{
return new Tuple<String,String>( key, value );
}
}
return override;
} | java |
private Tuple<String,String> getEntry( final String key,
final Collection<String> prefixes )
{
if ( CollectionUtils.isEmpty( prefixes ) )
{
return getEntry( key );
}
for( String prefix : prefixes )
{
String prefixedKey = prefix + "." + key;
Tuple<String,String> override = getOverrideEntry( prefixedKey );
if ( override != null )
{
return override;
}
//
// Above we were checking overrides of the override. Here,
// just check for the first override. If that doesn't work,
// then we need to just pass it on and ignore the specified override.
//
String value = getPropertyValue( prefixedKey );
if ( value != null )
{
return new Tuple<String,String>( prefixedKey, value );
}
}
//
// No prefixed overrides were found, so drop back to using
// the standard, non-prefixed version
//
return getEntry( key );
} | java |
@Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactory() {
HibernateJpaVendorAdapter jpaVendorAdapter = new HibernateJpaVendorAdapter();
jpaVendorAdapter.setDatabase(Database.H2);
jpaVendorAdapter.setGenerateDdl(true);
LocalContainerEntityManagerFactoryBean entityManagerFactory = new LocalContainerEntityManagerFactoryBean();
entityManagerFactory.setPackagesToScan(this.getClass().getPackage().getName());
entityManagerFactory.setJpaVendorAdapter(jpaVendorAdapter);
entityManagerFactory.setDataSource(dataSource());
return entityManagerFactory;
} | java |
@Bean
public ServletRegistrationBean h2servletRegistration() {
ServletRegistrationBean servletRegistrationBean = new ServletRegistrationBean(new WebServlet());
servletRegistrationBean.addUrlMappings("/h2/*");
return servletRegistrationBean;
} | java |
public static void setAttribute(PageContext pageContext, String scope, String name, Object value) throws JspTagException {
pageContext.setAttribute(name, value, Scope.getScopeId(scope));
} | java |
public static Object findObject(PageContext pageContext, String scope, String name, String property, boolean beanRequired, boolean valueRequired) throws JspTagException {
try {
// Check the name
if(name==null) throw new AttributeRequiredException("name");
// Find the bean
Object bean;
if(scope==null) bean = pageContext.findAttribute(name);
else bean = pageContext.getAttribute(name, Scope.getScopeId(scope));
// Check required
if(bean==null) {
if(beanRequired) {
// null and required
if(scope==null) throw new LocalizedJspTagException(ApplicationResources.accessor, "PropertyUtils.bean.required.nullScope", name);
else throw new LocalizedJspTagException(ApplicationResources.accessor, "PropertyUtils.bean.required.scope", name, scope);
} else {
// null and not required
return null;
}
} else {
if(property==null) {
// No property lookup, use the bean directly
return bean;
} else {
// Find the property
Object value = org.apache.commons.beanutils.PropertyUtils.getProperty(bean, property);
if(valueRequired && value==null) {
// null and required
if(scope==null) throw new LocalizedJspTagException(ApplicationResources.accessor, "PropertyUtils.value.required.nullScope", property, name);
else throw new LocalizedJspTagException(ApplicationResources.accessor, "PropertyUtils.value.required.scope", property, name, scope);
}
return value;
}
}
} catch(IllegalAccessException | InvocationTargetException | NoSuchMethodException err) {
throw new JspTagException(err);
}
} | java |
public static int getScopeId(String scope) throws JspTagException {
if(scope==null || PAGE.equals(scope)) return PageContext.PAGE_SCOPE;
else if(REQUEST.equals(scope)) return PageContext.REQUEST_SCOPE;
else if(SESSION.equals(scope)) return PageContext.SESSION_SCOPE;
else if(APPLICATION.equals(scope)) return PageContext.APPLICATION_SCOPE;
else throw new LocalizedJspTagException(ApplicationResources.accessor, "Scope.scope.invalid", scope);
} | java |
public static Map<String,String> bundleToStringMap( final ResourceBundle bundle,
final String suffix )
{
if ( bundle == null )
{
return Collections.<String,String>emptyMap();
}
String theSuffix;
if ( StringUtils.isEmpty( suffix ) )
{
theSuffix = "";
}
else
{
theSuffix = suffix + ".";
}
Map<String,String> map = new LinkedHashMap<String,String>();
Enumeration<String> keys = bundle.getKeys();
while( keys.hasMoreElements() )
{
String key = keys.nextElement();
Object value = bundle.getObject( key );
String strValue = ( value != null ) ? value.toString() : null;
map.put( theSuffix + key, strValue );
}
return map;
} | java |
@Override
public Map<String, Double> predict(Tuple predict) {
KNNEngine engine = new KNNEngine(predict, trainingData, k);
if (mode == 1) {
engine.getDistance(engine.chebyshevDistance);
} else if (mode == 2) {
engine.getDistance(engine.manhattanDistance);
} else {
engine.getDistance(engine.euclideanDistance);
}
predict.label = engine.getResult();
Map<String, Double> outputMap = new ConcurrentHashMap<>();
trainingData.parallelStream().forEach(x -> outputMap.put(String.valueOf(x.id), (Double) x.getExtra().get(DISTANCE)));
return outputMap;
} | java |
public void purge()
{
WeakElement<?> element;
while( ( element = (WeakElement<?>) _queue.poll() ) != null )
{
_set.remove( element );
}
} | java |
@Override
public ISeqClassifier train(List<SequenceTuple> trainingData) {
if (trainingData == null || trainingData.size() == 0) {
LOG.warn("Training data is empty.");
return this;
}
if (modelPath == null) {
try {
modelPath = Files.createTempDirectory("crfsuite").toAbsolutePath().toString();
} catch (IOException e) {
LOG.error("Create temp directory failed.", e);
e.printStackTrace();
}
}
Pair<List<ItemSequence>, List<StringList>> crfCompatibleTrainingData = loadTrainingData(trainingData);
Trainer trainer = new Trainer();
String algorithm = (String) props.getOrDefault("algorithm", DEFAULT_ALGORITHM);
props.remove("algorithm");
String graphicalModelType = (String) props.getOrDefault("graphicalModelType", DEFAULT_GRAPHICAL_MODEL_TYPE);
props.remove("graphicalModelType");
trainer.select(algorithm, graphicalModelType);
// Set parameters
props.entrySet().forEach(pair -> trainer.set((String) pair.getKey(), (String) pair.getValue()));
// Add training data into the trainer
for (int i = 0; i < trainingData.size(); i++) {
// Use group id = 0 but the API doesn't say what it is used for :(
trainer.append(crfCompatibleTrainingData.getLeft().get(i), crfCompatibleTrainingData.getRight().get(i), 0);
}
// Start training without hold-outs. trainer.message()
// will be called to report the training process
trainer.train(modelPath, -1);
return this;
} | java |
public static String getSignatureBaseString(String requestMethod, String requestUrl,
Map<String, String> protocolParameters) throws AuthException {
StringBuilder sb = new StringBuilder();
sb.append(requestMethod.toUpperCase()).append("&")
.append(AuthUtils.percentEncode(normalizeUrl(requestUrl))).append("&")
.append(AuthUtils.percentEncode(normalizeParameters(requestUrl, protocolParameters)));
return sb.toString();
} | java |
public static List<Tuple> createBalancedTrainingData(final List<Tuple> trainingData) {
Map<String, Long> tagCount = trainingData.parallelStream()
.map(x -> new AbstractMap.SimpleImmutableEntry<>(x.label, 1))
.collect(Collectors.groupingBy(Map.Entry::getKey, Collectors.counting()));
Map.Entry<String, Long> minCountEntry = tagCount.entrySet().stream()
.min(Comparator.comparing(Map.Entry::getValue))
.orElse(null);
tagCount.clear();
List<Tuple> newData = new ArrayList<>();
for (Tuple t : trainingData) {
String label = t.label;
if (!tagCount.containsKey(label)) {
tagCount.put(t.label, 0L);
}
if (tagCount.get(label) < minCountEntry.getValue()) {
tagCount.put(label, tagCount.get(label) + 1);
newData.add(t);
}
}
return newData;
} | java |
public static Pair<List<Tuple>, List<Tuple>> splitData(final List<Tuple> trainingData, double proportion) {
if (proportion < 0 || proportion > 1) {
throw new RuntimeException("Proportion should between 0.0 - 1.0");
}
if (proportion > 0.5) {
proportion = 1 - proportion;
}
List<Tuple> smallList = new ArrayList<>();
List<Tuple> largeList = new ArrayList<>();
int smallListSize = (int) Math.floor(proportion * trainingData.size());
int ct = 0;
Set<Integer> indices = new HashSet<>();
while (ct < smallListSize && trainingData.size() > indices.size()) {
int index = (int) (Math.random() * (trainingData.size() - 1));
while (indices.contains(index)) {
index = (int) (Math.random() * (trainingData.size() - 1));
}
indices.add(index);
ct++;
}
smallList.addAll(indices.stream().map(trainingData::get).collect(Collectors.toList()));
IntStream.range(0, trainingData.size())
.filter(x -> !indices.contains(x))
.forEach(i -> largeList.add(trainingData.get(i)));
return new ImmutablePair<>(smallList, largeList);
} | java |
public void calculateLabelPrior() {
double prior = 1D / model.labelIndexer.getLabelSize();
model.labelIndexer.getIndexSet().forEach(labelIndex -> model.labelPrior.put(labelIndex, prior));
} | java |
public static void writeWithMarkup(Object value, MarkupType markupType, MediaEncoder encoder, Writer out) throws IOException {
if(encoder==null) {
writeWithMarkup(value, markupType, out);
} else {
if(value!=null) {
if(
value instanceof Writable
&& !((Writable)value).isFastToString()
) {
// Avoid intermediate String from Writable
Coercion.write(value, encoder, out);
} else {
String str = Coercion.toString(value);
BundleLookupMarkup lookupMarkup;
BundleLookupThreadContext threadContext = BundleLookupThreadContext.getThreadContext(false);
if(threadContext!=null) {
lookupMarkup = threadContext.getLookupMarkup(str);
} else {
lookupMarkup = null;
}
if(lookupMarkup!=null) lookupMarkup.appendPrefixTo(markupType, encoder, out);
encoder.write(str, out);
if(lookupMarkup!=null) lookupMarkup.appendSuffixTo(markupType, encoder, out);
}
}
}
} | java |
public void buildModel(String wordFileName) throws IOException {
BufferedReader br = new BufferedReader(new FileReader(new File(wordFileName)));
String str;
while ((str = br.readLine()) != null) {
List<String> tokens = StanfordParser.stanfordTokenize(str).stream().map(CoreLabel::originalText).collect(Collectors.toList());
for (String word : tokens) {
double count = model.wordProbability.containsKey(word) ? model.wordProbability.get(word) : 0;
count++;
model.wordProbability.put(word, count);
}
}
br.close();
// Remove Empty prob.
model.wordProbability.remove(StringUtils.EMPTY);
model.normalizeWordProbability();
} | java |
public static int ipv4ToInt( final Inet4Address addr )
{
int value = 0;
for( byte chunk : addr.getAddress() )
{
value <<= 8;
value |= chunk & 0xff;
}
return value;
} | java |
public static <E> List<E> empty(List<E> list) {
return Optional.ofNullable(list).orElse(newArrayList());
} | java |
public static <E> List<E> clean(List<E> list) {
if (iterable(list)) {
return list.stream().filter(e -> {
if (Objects.isNull(e)) {
return false;
}
if (e instanceof Nullable) {
return !((Nullable) e).isNull();
}
return true;
}).collect(Collectors.toList());
}
return list;
} | java |
public ChainWriter encodeJavaScriptStringInXmlAttribute(Object value) throws IOException {
// Two stage encoding:
// 1) Text -> JavaScript (with quotes added)
// 2) JavaScript -> XML Attribute
if(
value instanceof Writable
&& !((Writable)value).isFastToString()
) {
// Avoid unnecessary toString calls
textInJavaScriptEncoder.writePrefixTo(javaScriptInXhtmlAttributeWriter);
Coercion.write(value, textInJavaScriptEncoder, javaScriptInXhtmlAttributeWriter);
textInJavaScriptEncoder.writeSuffixTo(javaScriptInXhtmlAttributeWriter);
} else {
String str = Coercion.toString(value);
BundleLookupMarkup lookupMarkup;
BundleLookupThreadContext threadContext = BundleLookupThreadContext.getThreadContext(false);
if(threadContext!=null) {
lookupMarkup = threadContext.getLookupMarkup(str);
} else {
lookupMarkup = null;
}
if(lookupMarkup!=null) lookupMarkup.appendPrefixTo(MarkupType.JAVASCRIPT, javaScriptInXhtmlAttributeWriter);
textInJavaScriptEncoder.writePrefixTo(javaScriptInXhtmlAttributeWriter);
textInJavaScriptEncoder.write(str, javaScriptInXhtmlAttributeWriter);
textInJavaScriptEncoder.writeSuffixTo(javaScriptInXhtmlAttributeWriter);
if(lookupMarkup!=null) lookupMarkup.appendSuffixTo(MarkupType.JAVASCRIPT, javaScriptInXhtmlAttributeWriter);
}
return this;
} | java |
@Deprecated
public ChainWriter printEU(String value) {
int len = value.length();
for (int c = 0; c < len; c++) {
char ch = value.charAt(c);
if (ch == ' ') out.print('+');
else {
if ((ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z')) out.print(ch);
else {
out.print('%');
out.print(getHex(ch >>> 4));
out.print(getHex(ch));
}
}
}
return this;
} | java |
public static void writeHtmlImagePreloadJavaScript(String url, Appendable out) throws IOException {
out.append("<script type='text/javascript'>\n"
+ " var img=new Image();\n"
+ " img.src=\"");
// Escape for javascript
StringBuilder javascript = new StringBuilder(url.length());
encodeTextInJavaScript(url, javascript);
// Encode for XML attribute
encodeJavaScriptInXhtmlAttribute(javascript, out);
out.append("\";\n"
+ "</script>");
} | java |
public Map<String, String> split(final CharSequence source) {
java.util.Objects.requireNonNull(source, "source");
Map<String, String> parameters = new HashMap<>();
Iterator<String> i = new StringIterator(source, pairSeparator);
while (i.hasNext()) {
String keyValue = i.next();
int keyValueSeparatorPosition = keyValueSeparatorStart(keyValue);
if (keyValueSeparatorPosition == 0 || keyValue.length() == 0) {
continue;
}
if (keyValueSeparatorPosition < 0) {
parameters.put(keyValue, null);
continue;
}
int keyStart = 0;
int keyEnd = keyValueSeparatorPosition;
while (keyStart < keyEnd && keyTrimMatcher.matches(keyValue.charAt(keyStart))) {
keyStart++;
}
while (keyStart < keyEnd && keyTrimMatcher.matches(keyValue.charAt(keyEnd - 1))) {
keyEnd--;
}
int valueStart = keyValueSeparatorPosition + keyValueSeparator.length();
int valueEnd = keyValue.length();
while (valueStart < valueEnd && valueTrimMatcher.matches(keyValue.charAt(valueStart))) {
valueStart++;
}
while (valueStart < valueEnd && valueTrimMatcher.matches(keyValue.charAt(valueEnd - 1))) {
valueEnd--;
}
String key = keyValue.substring(keyStart, keyEnd);
String value = keyValue.substring(valueStart, valueEnd);
parameters.put(key, value);
}
return parameters;
} | java |
public Splitter trim(char c) {
Matcher matcher = new CharMatcher(c);
return new Splitter(pairSeparator, keyValueSeparator, matcher, matcher);
} | java |
public Splitter trim(char[] chars) {
Matcher matcher = new CharsMatcher(chars);
return new Splitter(pairSeparator, keyValueSeparator, matcher, matcher);
} | java |
public void close() {
super.clear();
if (indexMap != null){
indexMap.clear();
indexMap = null;
}
if (this.indexStore != null){
getIndexStore().close();
this.indexStore = null ;
}
if (this.cacheStore != null){
getCacheStore().close();
this.cacheStore = null ;
}
} | java |
private double gaussianUpdate(int predicate, int oid, double correctionConstant) {
double param = params[predicate].getParameters()[oid];
double x0 = 0.0;
double modelValue = modelExpects[0][predicate].getParameters()[oid];
double observedValue = observedExpects[predicate].getParameters()[oid];
for (int i = 0; i < 50; i++) {
double tmp = modelValue * Math.exp(correctionConstant * x0);
double f = tmp + (param + x0) / sigma - observedValue;
double fp = tmp * correctionConstant + 1 / sigma;
if (fp == 0) {
break;
}
double x = x0 - f / fp;
if (Math.abs(x - x0) < 0.000001) {
x0 = x;
break;
}
x0 = x;
}
return x0;
} | java |
@Override
final public void writeSuffixTo(Appendable out) throws IOException {
writeSuffix(buffer, out);
buffer.setLength(0);
} | java |
public static <T> T nullIfEmpty(T value) throws IOException {
return isEmpty(value) ? null : value;
} | java |
public static String toCapCase( final String string )
{
if ( string == null )
{
return null;
}
if( string.length() == 1 )
{
return string.toUpperCase();
}
return Character.toUpperCase( string.charAt( 0 ) ) + string.substring( 1 ).toLowerCase();
} | java |
public static void appendToBuffer( final StringBuffer buffer,
final String string,
final String delimiter )
{
if ( string == null )
{
return;
}
//
// Only append the delimiter in front if the buffer isn't empty.
//
if ( buffer.length() == 0 || delimiter == null )
{
buffer.append( string );
}
else
{
buffer.append( delimiter ).append( string );
}
} | java |
@SafeVarargs
public static <T> T coalesce( final T ... things )
{
if( things == null || things.length == 0 )
{
return null;
}
for( T thing : things )
{
if( thing != null )
{
return thing;
}
}
return null;
} | java |
@SafeVarargs
public static <T> T coalesceNonEmpty( final T ... things )
{
if( things == null || things.length == 0 )
{
return null;
}
for( T thing : things )
{
if( thing instanceof CharSequence )
{
if( ! StringUtils.isBlank( (CharSequence) thing ) )
{
return thing;
}
}
else if( thing != null )
{
return thing;
}
}
return null;
} | java |
@Override
public BeanInfo[] getAdditionalBeanInfo() {
try {
return new BeanInfo[] {
Introspector.getBeanInfo(InputTag.class.getSuperclass())
};
} catch(IntrospectionException err) {
throw new AssertionError(err);
}
} | java |
public static String fileNameFromString( final String text )
{
String value = text.replace( ' ', '_' );
if ( value.length() < 48 )
{
return value;
}
return value.substring( 0, 47 );
} | java |
public static int executeToFile( final String[] command,
final File file )
throws
IOException,
InterruptedException
{
return executeToFile( command, file, System.err );
} | java |
public static int executeToFile( final String[] command,
final File file,
final OutputStream stderr )
throws
IOException,
InterruptedException
{
return executeToStreams( command, new FileOutputStream( file ), stderr );
} | java |
public static void makeSecurityCheck( final File file,
final File base )
{
if( ! file.getAbsolutePath().startsWith( base.getAbsolutePath() ) )
{
throw new IllegalArgumentException( "Illegal file path [" + file + "]" );
}
} | java |
public void setLink(Link link) {
setHref(link.getHref());
setHrefAbsolute(link.getHrefAbsolute());
HttpParameters linkParams = link.getParams();
if(linkParams != null) {
for(Map.Entry<String,List<String>> entry : linkParams.getParameterMap().entrySet()) {
String paramName = entry.getKey();
for(String paramValue : entry.getValue()) {
addParam(paramName, paramValue);
}
}
}
this.addLastModified = link.getAddLastModified();
setHreflang(link.getHreflang());
setRel(link.getRel());
setType(link.getType());
setMedia(link.getMedia());
setTitle(link.getTitle());
} | java |
protected static void update(String[] ec, Set<String> predicateSet, Map<String, Integer> counter, int cutoff) {
for (String s : ec) {
Integer val = counter.get(s);
val = val == null ? 1 : val + 1;
counter.put(s, val);
if (!predicateSet.contains(s) && counter.get(s) >= cutoff) {
predicateSet.add(s);
}
}
} | java |
private GrammaticalStructure tagDependencies(List<? extends HasWord> taggedWords) {
GrammaticalStructure gs = nndepParser.predict(taggedWords);
return gs;
} | java |
public String[] getFeatNames() {
String[] namesArray = new String[nameIndexMap.size()];
for (Map.Entry<String, Integer> entry : nameIndexMap.entrySet()) {
namesArray[entry.getValue()] = entry.getKey();
}
return namesArray;
} | java |
public void tagPOS(List<CoreLabel> tokens, Tree tree) {
try {
List<TaggedWord> posList = tree.getChild(0).taggedYield();
for (int i = 0; i < tokens.size(); i++) {
String pos = posList.get(i).tag();
tokens.get(i).setTag(pos);
}
} catch (Exception e) {
tagPOS(tokens); // At least gives you something.
LOG.warn("POS Failed:\n" + tree.pennString());
}
} | java |
public Pair<CoreMap, GrammaticalStructure> parseForCoref(String sentence) {
List<CoreLabel> tokens = stanfordTokenize(sentence);
Tree tree = parser.parse(tokens);
GrammaticalStructure gs = tagDependencies(tree, true);
tagPOS(tokens);
tagLemma(tokens);
tagNamedEntity(tokens);
CoreMap result = new ArrayCoreMap();
result.set(CoreAnnotations.TokensAnnotation.class, tokens);
result.set(TreeCoreAnnotations.TreeAnnotation.class, tree);
GrammaticalStructure.Extras extras = GrammaticalStructure.Extras.NONE;
SemanticGraph deps = SemanticGraphFactory.generateCollapsedDependencies(gs, extras);
SemanticGraph uncollapsedDeps = SemanticGraphFactory.generateUncollapsedDependencies(gs, extras);
SemanticGraph ccDeps = SemanticGraphFactory.generateCCProcessedDependencies(gs, extras);
result.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, deps);
result.set(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, uncollapsedDeps);
result.set(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, ccDeps);
return new ImmutablePair<>(result, gs);
} | java |
public static boolean causedBy( final Throwable ex,
final Class<? extends Throwable> exceptionClass )
{
Throwable cause = ex;
while( cause != null && ! exceptionClass.isInstance( cause ) )
{
cause = cause.getCause();
}
return ( cause == null ) ? false : true;
} | java |
public static <T extends Throwable> T getCause( final Throwable ex,
final Class<T> exceptionClass )
{
Throwable cause = ex;
while( cause != null && ! exceptionClass.isInstance( cause ) )
{
cause = cause.getCause();
}
return ( cause == null ) ? null : exceptionClass.cast( cause );
} | java |
public static String getMessage( final Throwable ex )
{
String message = ex.getMessage();
//
// It *appears* as though the SQLException hasn't been
// converted to nest? It seems like it has it's own
// method of nesting, not sure why. I don't know
// why Sun wouldn't have converted it. Maybe they
// did, but just left these getNextException methods
// on for compatibility? In my java source code, they
// aren't deprecated, though.
//
if ( ex instanceof SQLException )
{
String sqlMessage = getSqlExceptionMessage( (SQLException) ex );
if ( ! StringUtils.isBlank( sqlMessage ) )
{
if ( ! StringUtils.isBlank( message ) )
{
message += "\n" + sqlMessage;
}
else
{
message = sqlMessage;
}
}
}
Throwable cause = ex.getCause();
if ( ex instanceof SamSixException
&& ((SamSixException) ex).isShowThisCauseOnly() )
{
return message;
}
if ( cause != null )
{
String causeMessage = ExceptionUtils.getMessage( cause );
if ( ! StringUtils.isBlank( causeMessage ) )
{
if ( ! StringUtils.isBlank( message ) )
{
//
// ALWAYS use "broadest first" when showing error messages.
// Otherwise, error messages end up with some non-human readable thing at the top,
// confusing users with deeply technical details. This is especially important for user errors.
//
// broadest/non-broadest should be used for stack traces only.
//
message = message + "\n" + causeMessage;
}
else
{
message = causeMessage;
}
}
}
if ( ! StringUtils.isBlank( message ) )
{
return message;
}
return ex.getClass().getName() + ": An error has been detected.";
} | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.