code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
import SharedHelpers.EventRecordingReporter
import scala.concurrent.{Future, ExecutionContext}
class AsyncWordSpecLikeSpec extends org.scalatest.FunSpec {
describe("AsyncWordSpecLike") {
it("can be used for tests that return Future") {
class ExampleSpec extends AsyncWordSpecLike {
implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global
type FixtureParam = String
def withAsyncFixture(test: OneArgAsyncTest): Future[Outcome] =
test("testing")
val a = 1
"test 1" in { fixture =>
Future {
assert(a == 1)
}
}
"test 2" in { fixture =>
Future {
assert(a == 2)
}
}
"test 3" in { fixture =>
Future {
pending
}
}
"test 4" in { fixture =>
Future {
cancel
}
}
"test 5" ignore { fixture =>
Future {
cancel
}
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
status.waitUntilCompleted()
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
it("can be used for tests that did not return Future") {
class ExampleSpec extends AsyncWordSpecLike {
implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global
type FixtureParam = String
def withAsyncFixture(test: OneArgAsyncTest): Future[Outcome] =
test("testing")
val a = 1
"test 1" in { fixture =>
assert(a == 1)
}
"test 2" in { fixture =>
assert(a == 2)
}
"test 3" in { fixture =>
pending
}
"test 4" in { fixture =>
cancel
}
"test 5" ignore { fixture =>
cancel
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
status.waitUntilCompleted()
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
}
} | SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/fixture/AsyncWordSpecLikeSpec.scala | Scala | apache-2.0 | 4,208 |
package akashic.storage.patch
import akashic.storage.backend.NodePath
import akashic.storage.service.{Acl, Location, Versioning}
case class Bucket(root: NodePath) extends Patch {
val acl = Acl.makeCache(root("acl"))
val versioning = Versioning.makeCache(root("versioning"))
val location = Location.makeCache(root("location"))
val keys: NodePath = root("keys")
def keyPath(name: String): NodePath = keys(name)
def init {
keys.makeDirectory
}
def findKey(name: String): Option[Key] = {
val path = keys(name)
if (path.exists)
Some(Key(this, path))
else
None
}
def listKeys: Iterable[Key] = keys.listDir.map(Key(this, _))
// Since there is no API to change the location of existing bucket
// and there is no chance that location file isn't created.
// We can use the creation time as the creation time of the bucket.
def creationTime: Long = location.root.getAttr.creationTime
}
| akiradeveloper/akashic-storage | src/main/scala/akashic/storage/patch/Bucket.scala | Scala | apache-2.0 | 933 |
// Copyright (c) 2011-2015 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package org.scalamock.test.specs2
import org.scalamock.specs2.MockContext
import org.scalamock.test.mockable.TestTrait
import org.specs2.mutable.Specification
/**
* Tests for mocks defined in fixture-contexts
*
* Tests for issue #25
*/
class FixtureContextTest extends Specification {
trait TestSetup extends MockContext {
val mockedTrait = mock[TestTrait]
val input = 1
val output = "one"
}
trait TestSetupWithExpectationsPredefined extends TestSetup {
(mockedTrait.oneParamMethod _).expects(input).returning(output)
}
trait TestSetupWithHandlerCalledDuringInitialization extends TestSetupWithExpectationsPredefined {
mockedTrait.oneParamMethod(input) must_== output
}
"Specs2 suite" should {
"allow to use mock defined in fixture-context" in new TestSetup {
(mockedTrait.oneParamMethod _).expects(input).returning(output)
(mockedTrait.oneParamMethod _).expects(2).returning("two")
mockedTrait.oneParamMethod(input) must_== output
mockedTrait.oneParamMethod(2) must_== "two"
}
"allow to use mock defined in fixture-context with expecations predefined" in new TestSetupWithExpectationsPredefined {
(mockedTrait.oneParamMethod _).expects(2).returning("two")
mockedTrait.oneParamMethod(input) must_== output
mockedTrait.oneParamMethod(2) must_== "two"
}
"allow mock defined in fixture-context to be used during context initialization" in new TestSetupWithHandlerCalledDuringInitialization {
(mockedTrait.oneParamMethod _).expects(2).returning("two")
mockedTrait.oneParamMethod(2) must_== "two"
}
}
}
| hypertino/ScalaMock | frameworks/specs2/shared/src/test/scala/org/scalamock/test/specs2/FixtureContextTest.scala | Scala | mit | 2,821 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Alexander Spoon
*/
package scala
package tools.nsc
package interpreter
import scala.language.{ implicitConversions, existentials }
import scala.annotation.tailrec
import Predef.{ println => _, _ }
import interpreter.session._
import StdReplTags._
import scala.reflect.api.{Mirror, Universe, TypeCreator}
import scala.util.Properties.{ jdkHome, javaVersion, versionString, javaVmName }
import scala.tools.nsc.util.{ ClassPath, Exceptional, stringFromWriter, stringFromStream }
import scala.reflect.{ClassTag, classTag}
import scala.reflect.internal.util.{ BatchSourceFile, ScalaClassLoader }
import ScalaClassLoader._
import scala.reflect.io.{ File, Directory }
import scala.tools.util._
import scala.collection.generic.Clearable
import scala.concurrent.{ ExecutionContext, Await, Future, future }
import ExecutionContext.Implicits._
import java.io.{ BufferedReader, FileReader }
/** The Scala interactive shell. It provides a read-eval-print loop
* around the Interpreter class.
* After instantiation, clients should call the main() method.
*
* If no in0 is specified, then input will come from the console, and
* the class will attempt to provide input editing feature such as
* input history.
*
* @author Moez A. Abdel-Gawad
* @author Lex Spoon
* @version 1.2
*/
class SparkILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
extends AnyRef
with LoopCommands
{
def this(in0: BufferedReader, out: JPrintWriter) = this(Some(in0), out)
def this() = this(None, new JPrintWriter(Console.out, true))
//
// @deprecated("Use `intp` instead.", "2.9.0") def interpreter = intp
// @deprecated("Use `intp` instead.", "2.9.0") def interpreter_= (i: Interpreter): Unit = intp = i
var in: InteractiveReader = _ // the input stream from which commands come
var settings: Settings = _
var intp: SparkIMain = _
var globalFuture: Future[Boolean] = _
protected def asyncMessage(msg: String) {
if (isReplInfo || isReplPower)
echoAndRefresh(msg)
}
def initializeSpark() {
intp.beQuietDuring {
command( """
@transient val sc = {
val _sc = org.apache.spark.repl.Main.createSparkContext()
println("Spark context available as sc.")
_sc
}
""")
command( """
@transient val sqlContext = {
val _sqlContext = org.apache.spark.repl.Main.createSQLContext()
println("SQL context available as sqlContext.")
_sqlContext
}
""")
command("import org.apache.spark.SparkContext._")
command("import sqlContext.implicits._")
command("import sqlContext.sql")
command("import org.apache.spark.sql.functions._")
}
}
/** Print a welcome message */
def printWelcome() {
import org.apache.spark.SPARK_VERSION
echo("""Welcome to
____ __
/ __/__ ___ _____/ /__
_\\ \\/ _ \\/ _ `/ __/ '_/
/___/ .__/\\_,_/_/ /_/\\_\\ version %s
/_/
""".format(SPARK_VERSION))
val welcomeMsg = "Using Scala %s (%s, Java %s)".format(
versionString, javaVmName, javaVersion)
echo(welcomeMsg)
echo("Type in expressions to have them evaluated.")
echo("Type :help for more information.")
}
override def echoCommandMessage(msg: String) {
intp.reporter printUntruncatedMessage msg
}
// lazy val power = new Power(intp, new StdReplVals(this))(tagOfStdReplVals, classTag[StdReplVals])
def history = in.history
// classpath entries added via :cp
var addedClasspath: String = ""
/** A reverse list of commands to replay if the user requests a :replay */
var replayCommandStack: List[String] = Nil
/** A list of commands to replay if the user requests a :replay */
def replayCommands = replayCommandStack.reverse
/** Record a command for replay should the user request a :replay */
def addReplay(cmd: String) = replayCommandStack ::= cmd
def savingReplayStack[T](body: => T): T = {
val saved = replayCommandStack
try body
finally replayCommandStack = saved
}
def savingReader[T](body: => T): T = {
val saved = in
try body
finally in = saved
}
/** Close the interpreter and set the var to null. */
def closeInterpreter() {
if (intp ne null) {
intp.close()
intp = null
}
}
class SparkILoopInterpreter extends SparkIMain(settings, out) {
outer =>
override lazy val formatting = new Formatting {
def prompt = SparkILoop.this.prompt
}
override protected def parentClassLoader =
settings.explicitParentLoader.getOrElse( classOf[SparkILoop].getClassLoader )
}
/** Create a new interpreter. */
def createInterpreter() {
if (addedClasspath != "")
settings.classpath append addedClasspath
intp = new SparkILoopInterpreter
}
/** print a friendly help message */
def helpCommand(line: String): Result = {
if (line == "") helpSummary()
else uniqueCommand(line) match {
case Some(lc) => echo("\\n" + lc.help)
case _ => ambiguousError(line)
}
}
private def helpSummary() = {
val usageWidth = commands map (_.usageMsg.length) max
val formatStr = "%-" + usageWidth + "s %s"
echo("All commands can be abbreviated, e.g. :he instead of :help.")
commands foreach { cmd =>
echo(formatStr.format(cmd.usageMsg, cmd.help))
}
}
private def ambiguousError(cmd: String): Result = {
matchingCommands(cmd) match {
case Nil => echo(cmd + ": no such command. Type :help for help.")
case xs => echo(cmd + " is ambiguous: did you mean " + xs.map(":" + _.name).mkString(" or ") + "?")
}
Result(keepRunning = true, None)
}
private def matchingCommands(cmd: String) = commands filter (_.name startsWith cmd)
private def uniqueCommand(cmd: String): Option[LoopCommand] = {
// this lets us add commands willy-nilly and only requires enough command to disambiguate
matchingCommands(cmd) match {
case List(x) => Some(x)
// exact match OK even if otherwise appears ambiguous
case xs => xs find (_.name == cmd)
}
}
/** Show the history */
lazy val historyCommand = new LoopCommand("history", "show the history (optional num is commands to show)") {
override def usage = "[num]"
def defaultLines = 20
def apply(line: String): Result = {
if (history eq NoHistory)
return "No history available."
val xs = words(line)
val current = history.index
val count = try xs.head.toInt catch { case _: Exception => defaultLines }
val lines = history.asStrings takeRight count
val offset = current - lines.size + 1
for ((line, index) <- lines.zipWithIndex)
echo("%3d %s".format(index + offset, line))
}
}
// When you know you are most likely breaking into the middle
// of a line being typed. This softens the blow.
protected def echoAndRefresh(msg: String) = {
echo("\\n" + msg)
in.redrawLine()
}
protected def echo(msg: String) = {
out println msg
out.flush()
}
/** Search the history */
def searchHistory(_cmdline: String) {
val cmdline = _cmdline.toLowerCase
val offset = history.index - history.size + 1
for ((line, index) <- history.asStrings.zipWithIndex ; if line.toLowerCase contains cmdline)
echo("%d %s".format(index + offset, line))
}
private val currentPrompt = Properties.shellPromptString
/** Prompt to print when awaiting input */
def prompt = currentPrompt
import LoopCommand.{ cmd, nullary }
/** Standard commands **/
lazy val standardCommands = List(
cmd("cp", "<path>", "add a jar or directory to the classpath", addClasspath),
cmd("edit", "<id>|<line>", "edit history", editCommand),
cmd("help", "[command]", "print this summary or command-specific help", helpCommand),
historyCommand,
cmd("h?", "<string>", "search the history", searchHistory),
cmd("imports", "[name name ...]", "show import history, identifying sources of names", importsCommand),
//cmd("implicits", "[-v]", "show the implicits in scope", intp.implicitsCommand),
cmd("javap", "<path|class>", "disassemble a file or class name", javapCommand),
cmd("line", "<id>|<line>", "place line(s) at the end of history", lineCommand),
cmd("load", "<path>", "interpret lines in a file", loadCommand),
cmd("paste", "[-raw] [path]", "enter paste mode or paste a file", pasteCommand),
// nullary("power", "enable power user mode", powerCmd),
nullary("quit", "exit the interpreter", () => Result(keepRunning = false, None)),
nullary("replay", "reset execution and replay all previous commands", replay),
nullary("reset", "reset the repl to its initial state, forgetting all session entries", resetCommand),
cmd("save", "<path>", "save replayable session to a file", saveCommand),
shCommand,
cmd("settings", "[+|-]<options>", "+enable/-disable flags, set compiler options", changeSettings),
nullary("silent", "disable/enable automatic printing of results", verbosity),
// cmd("type", "[-v] <expr>", "display the type of an expression without evaluating it", typeCommand),
// cmd("kind", "[-v] <expr>", "display the kind of expression's type", kindCommand),
nullary("warnings", "show the suppressed warnings from the most recent line which had any", warningsCommand)
)
/** Power user commands */
// lazy val powerCommands: List[LoopCommand] = List(
// cmd("phase", "<phase>", "set the implicit phase for power commands", phaseCommand)
// )
private def importsCommand(line: String): Result = {
val tokens = words(line)
val handlers = intp.languageWildcardHandlers ++ intp.importHandlers
handlers.filterNot(_.importedSymbols.isEmpty).zipWithIndex foreach {
case (handler, idx) =>
val (types, terms) = handler.importedSymbols partition (_.name.isTypeName)
val imps = handler.implicitSymbols
val found = tokens filter (handler importsSymbolNamed _)
val typeMsg = if (types.isEmpty) "" else types.size + " types"
val termMsg = if (terms.isEmpty) "" else terms.size + " terms"
val implicitMsg = if (imps.isEmpty) "" else imps.size + " are implicit"
val foundMsg = if (found.isEmpty) "" else found.mkString(" // imports: ", ", ", "")
val statsMsg = List(typeMsg, termMsg, implicitMsg) filterNot (_ == "") mkString ("(", ", ", ")")
intp.reporter.printMessage("%2d) %-30s %s%s".format(
idx + 1,
handler.importString,
statsMsg,
foundMsg
))
}
}
private def findToolsJar() = PathResolver.SupplementalLocations.platformTools
private def addToolsJarToLoader() = {
val cl = findToolsJar() match {
case Some(tools) => ScalaClassLoader.fromURLs(Seq(tools.toURL), intp.classLoader)
case _ => intp.classLoader
}
if (Javap.isAvailable(cl)) {
repldbg(":javap available.")
cl
}
else {
repldbg(":javap unavailable: no tools.jar at " + jdkHome)
intp.classLoader
}
}
//
// protected def newJavap() =
// JavapClass(addToolsJarToLoader(), new IMain.ReplStrippingWriter(intp), Some(intp))
//
// private lazy val javap = substituteAndLog[Javap]("javap", NoJavap)(newJavap())
// Still todo: modules.
// private def typeCommand(line0: String): Result = {
// line0.trim match {
// case "" => ":type [-v] <expression>"
// case s => intp.typeCommandInternal(s stripPrefix "-v " trim, verbose = s startsWith "-v ")
// }
// }
// private def kindCommand(expr: String): Result = {
// expr.trim match {
// case "" => ":kind [-v] <expression>"
// case s => intp.kindCommandInternal(s stripPrefix "-v " trim, verbose = s startsWith "-v ")
// }
// }
private def warningsCommand(): Result = {
if (intp.lastWarnings.isEmpty)
"Can't find any cached warnings."
else
intp.lastWarnings foreach { case (pos, msg) => intp.reporter.warning(pos, msg) }
}
private def changeSettings(args: String): Result = {
def showSettings() = {
for (s <- settings.userSetSettings.toSeq.sorted) echo(s.toString)
}
def updateSettings() = {
// put aside +flag options
val (pluses, rest) = (args split "\\\\s+").toList partition (_.startsWith("+"))
val tmps = new Settings
val (ok, leftover) = tmps.processArguments(rest, processAll = true)
if (!ok) echo("Bad settings request.")
else if (leftover.nonEmpty) echo("Unprocessed settings.")
else {
// boolean flags set-by-user on tmp copy should be off, not on
val offs = tmps.userSetSettings filter (_.isInstanceOf[Settings#BooleanSetting])
val (minuses, nonbools) = rest partition (arg => offs exists (_ respondsTo arg))
// update non-flags
settings.processArguments(nonbools, processAll = true)
// also snag multi-value options for clearing, e.g. -Ylog: and -language:
for {
s <- settings.userSetSettings
if s.isInstanceOf[Settings#MultiStringSetting] || s.isInstanceOf[Settings#PhasesSetting]
if nonbools exists (arg => arg.head == '-' && arg.last == ':' && (s respondsTo arg.init))
} s match {
case c: Clearable => c.clear()
case _ =>
}
def update(bs: Seq[String], name: String=>String, setter: Settings#Setting=>Unit) = {
for (b <- bs)
settings.lookupSetting(name(b)) match {
case Some(s) =>
if (s.isInstanceOf[Settings#BooleanSetting]) setter(s)
else echo(s"Not a boolean flag: $b")
case _ =>
echo(s"Not an option: $b")
}
}
update(minuses, identity, _.tryToSetFromPropertyValue("false")) // turn off
update(pluses, "-" + _.drop(1), _.tryToSet(Nil)) // turn on
}
}
if (args.isEmpty) showSettings() else updateSettings()
}
private def javapCommand(line: String): Result = {
// if (javap == null)
// ":javap unavailable, no tools.jar at %s. Set JDK_HOME.".format(jdkHome)
// else if (line == "")
// ":javap [-lcsvp] [path1 path2 ...]"
// else
// javap(words(line)) foreach { res =>
// if (res.isError) return "Failed: " + res.value
// else res.show()
// }
}
private def pathToPhaseWrapper = intp.originalPath("$r") + ".phased.atCurrent"
private def phaseCommand(name: String): Result = {
// val phased: Phased = power.phased
// import phased.NoPhaseName
//
// if (name == "clear") {
// phased.set(NoPhaseName)
// intp.clearExecutionWrapper()
// "Cleared active phase."
// }
// else if (name == "") phased.get match {
// case NoPhaseName => "Usage: :phase <expr> (e.g. typer, erasure.next, erasure+3)"
// case ph => "Active phase is '%s'. (To clear, :phase clear)".format(phased.get)
// }
// else {
// val what = phased.parse(name)
// if (what.isEmpty || !phased.set(what))
// "'" + name + "' does not appear to represent a valid phase."
// else {
// intp.setExecutionWrapper(pathToPhaseWrapper)
// val activeMessage =
// if (what.toString.length == name.length) "" + what
// else "%s (%s)".format(what, name)
//
// "Active phase is now: " + activeMessage
// }
// }
}
/** Available commands */
def commands: List[LoopCommand] = standardCommands ++ (
// if (isReplPower)
// powerCommands
// else
Nil
)
val replayQuestionMessage =
"""|That entry seems to have slain the compiler. Shall I replay
|your session? I can re-run each line except the last one.
|[y/n]
""".trim.stripMargin
private val crashRecovery: PartialFunction[Throwable, Boolean] = {
case ex: Throwable =>
val (err, explain) = (
if (intp.isInitializeComplete)
(intp.global.throwableAsString(ex), "")
else
(ex.getMessage, "The compiler did not initialize.\\n")
)
echo(err)
ex match {
case _: NoSuchMethodError | _: NoClassDefFoundError =>
echo("\\nUnrecoverable error.")
throw ex
case _ =>
def fn(): Boolean =
try in.readYesOrNo(explain + replayQuestionMessage, { echo("\\nYou must enter y or n.") ; fn() })
catch { case _: RuntimeException => false }
if (fn()) replay()
else echo("\\nAbandoning crashed session.")
}
true
}
// return false if repl should exit
def processLine(line: String): Boolean = {
import scala.concurrent.duration._
Await.ready(globalFuture, 60.seconds)
(line ne null) && (command(line) match {
case Result(false, _) => false
case Result(_, Some(line)) => addReplay(line) ; true
case _ => true
})
}
private def readOneLine() = {
out.flush()
in readLine prompt
}
/** The main read-eval-print loop for the repl. It calls
* command() for each line of input, and stops when
* command() returns false.
*/
@tailrec final def loop() {
if ( try processLine(readOneLine()) catch crashRecovery )
loop()
}
/** interpret all lines from a specified file */
def interpretAllFrom(file: File) {
savingReader {
savingReplayStack {
file applyReader { reader =>
in = SimpleReader(reader, out, interactive = false)
echo("Loading " + file + "...")
loop()
}
}
}
}
/** create a new interpreter and replay the given commands */
def replay() {
reset()
if (replayCommandStack.isEmpty)
echo("Nothing to replay.")
else for (cmd <- replayCommands) {
echo("Replaying: " + cmd) // flush because maybe cmd will have its own output
command(cmd)
echo("")
}
}
def resetCommand() {
echo("Resetting interpreter state.")
if (replayCommandStack.nonEmpty) {
echo("Forgetting this session history:\\n")
replayCommands foreach echo
echo("")
replayCommandStack = Nil
}
if (intp.namedDefinedTerms.nonEmpty)
echo("Forgetting all expression results and named terms: " + intp.namedDefinedTerms.mkString(", "))
if (intp.definedTypes.nonEmpty)
echo("Forgetting defined types: " + intp.definedTypes.mkString(", "))
reset()
}
def reset() {
intp.reset()
unleashAndSetPhase()
}
def lineCommand(what: String): Result = editCommand(what, None)
// :edit id or :edit line
def editCommand(what: String): Result = editCommand(what, Properties.envOrNone("EDITOR"))
def editCommand(what: String, editor: Option[String]): Result = {
def diagnose(code: String) = {
echo("The edited code is incomplete!\\n")
val errless = intp compileSources new BatchSourceFile("<pastie>", s"object pastel {\\n$code\\n}")
if (errless) echo("The compiler reports no errors.")
}
def historicize(text: String) = history match {
case jlh: JLineHistory => text.lines foreach jlh.add ; jlh.moveToEnd() ; true
case _ => false
}
def edit(text: String): Result = editor match {
case Some(ed) =>
val tmp = File.makeTemp()
tmp.writeAll(text)
try {
val pr = new ProcessResult(s"$ed ${tmp.path}")
pr.exitCode match {
case 0 =>
tmp.safeSlurp() match {
case Some(edited) if edited.trim.isEmpty => echo("Edited text is empty.")
case Some(edited) =>
echo(edited.lines map ("+" + _) mkString "\\n")
val res = intp interpret edited
if (res == IR.Incomplete) diagnose(edited)
else {
historicize(edited)
Result(lineToRecord = Some(edited), keepRunning = true)
}
case None => echo("Can't read edited text. Did you delete it?")
}
case x => echo(s"Error exit from $ed ($x), ignoring")
}
} finally {
tmp.delete()
}
case None =>
if (historicize(text)) echo("Placing text in recent history.")
else echo(f"No EDITOR defined and you can't change history, echoing your text:%n$text")
}
// if what is a number, use it as a line number or range in history
def isNum = what forall (c => c.isDigit || c == '-' || c == '+')
// except that "-" means last value
def isLast = (what == "-")
if (isLast || !isNum) {
val name = if (isLast) intp.mostRecentVar else what
val sym = intp.symbolOfIdent(name)
intp.prevRequestList collectFirst { case r if r.defines contains sym => r } match {
case Some(req) => edit(req.line)
case None => echo(s"No symbol in scope: $what")
}
} else try {
val s = what
// line 123, 120+3, -3, 120-123, 120-, note -3 is not 0-3 but (cur-3,cur)
val (start, len) =
if ((s indexOf '+') > 0) {
val (a,b) = s splitAt (s indexOf '+')
(a.toInt, b.drop(1).toInt)
} else {
(s indexOf '-') match {
case -1 => (s.toInt, 1)
case 0 => val n = s.drop(1).toInt ; (history.index - n, n)
case _ if s.last == '-' => val n = s.init.toInt ; (n, history.index - n)
case i => val n = s.take(i).toInt ; (n, s.drop(i+1).toInt - n)
}
}
import scala.collection.JavaConverters._
val index = (start - 1) max 0
val text = history match {
case jlh: JLineHistory => jlh.entries(index).asScala.take(len) map (_.value) mkString "\\n"
case _ => history.asStrings.slice(index, index + len) mkString "\\n"
}
edit(text)
} catch {
case _: NumberFormatException => echo(s"Bad range '$what'")
echo("Use line 123, 120+3, -3, 120-123, 120-, note -3 is not 0-3 but (cur-3,cur)")
}
}
/** fork a shell and run a command */
lazy val shCommand = new LoopCommand("sh", "run a shell command (result is implicitly => List[String])") {
override def usage = "<command line>"
def apply(line: String): Result = line match {
case "" => showUsage()
case _ =>
val toRun = s"new ${classOf[ProcessResult].getName}(${string2codeQuoted(line)})"
intp interpret toRun
()
}
}
def withFile[A](filename: String)(action: File => A): Option[A] = {
val res = Some(File(filename)) filter (_.exists) map action
if (res.isEmpty) echo("That file does not exist") // courtesy side-effect
res
}
def loadCommand(arg: String) = {
var shouldReplay: Option[String] = None
withFile(arg)(f => {
interpretAllFrom(f)
shouldReplay = Some(":load " + arg)
})
Result(keepRunning = true, shouldReplay)
}
def saveCommand(filename: String): Result = (
if (filename.isEmpty) echo("File name is required.")
else if (replayCommandStack.isEmpty) echo("No replay commands in session")
else File(filename).printlnAll(replayCommands: _*)
)
def addClasspath(arg: String): Unit = {
val f = File(arg).normalize
if (f.exists) {
addedClasspath = ClassPath.join(addedClasspath, f.path)
val totalClasspath = ClassPath.join(settings.classpath.value, addedClasspath)
echo("Added '%s'. Your new classpath is:\\n\\"%s\\"".format(f.path, totalClasspath))
replay()
}
else echo("The path '" + f + "' doesn't seem to exist.")
}
def powerCmd(): Result = {
if (isReplPower) "Already in power mode."
else enablePowerMode(isDuringInit = false)
}
def enablePowerMode(isDuringInit: Boolean) = {
replProps.power setValue true
unleashAndSetPhase()
// asyncEcho(isDuringInit, power.banner)
}
private def unleashAndSetPhase() {
if (isReplPower) {
// power.unleash()
// Set the phase to "typer"
// intp beSilentDuring phaseCommand("typer")
}
}
def asyncEcho(async: Boolean, msg: => String) {
if (async) asyncMessage(msg)
else echo(msg)
}
def verbosity() = {
val old = intp.printResults
intp.printResults = !old
echo("Switched " + (if (old) "off" else "on") + " result printing.")
}
/** Run one command submitted by the user. Two values are returned:
* (1) whether to keep running, (2) the line to record for replay,
* if any. */
def command(line: String): Result = {
if (line startsWith ":") {
val cmd = line.tail takeWhile (x => !x.isWhitespace)
uniqueCommand(cmd) match {
case Some(lc) => lc(line.tail stripPrefix cmd dropWhile (_.isWhitespace))
case _ => ambiguousError(cmd)
}
}
else if (intp.global == null) Result(keepRunning = false, None) // Notice failure to create compiler
else Result(keepRunning = true, interpretStartingWith(line))
}
private def readWhile(cond: String => Boolean) = {
Iterator continually in.readLine("") takeWhile (x => x != null && cond(x))
}
def pasteCommand(arg: String): Result = {
var shouldReplay: Option[String] = None
def result = Result(keepRunning = true, shouldReplay)
val (raw, file) =
if (arg.isEmpty) (false, None)
else {
val r = """(-raw)?(\\s+)?([^\\-]\\S*)?""".r
arg match {
case r(flag, sep, name) =>
if (flag != null && name != null && sep == null)
echo(s"""I assume you mean "$flag $name"?""")
(flag != null, Option(name))
case _ =>
echo("usage: :paste -raw file")
return result
}
}
val code = file match {
case Some(name) =>
withFile(name)(f => {
shouldReplay = Some(s":paste $arg")
val s = f.slurp.trim
if (s.isEmpty) echo(s"File contains no code: $f")
else echo(s"Pasting file $f...")
s
}) getOrElse ""
case None =>
echo("// Entering paste mode (ctrl-D to finish)\\n")
val text = (readWhile(_ => true) mkString "\\n").trim
if (text.isEmpty) echo("\\n// Nothing pasted, nothing gained.\\n")
else echo("\\n// Exiting paste mode, now interpreting.\\n")
text
}
def interpretCode() = {
val res = intp interpret code
// if input is incomplete, let the compiler try to say why
if (res == IR.Incomplete) {
echo("The pasted code is incomplete!\\n")
// Remembrance of Things Pasted in an object
val errless = intp compileSources new BatchSourceFile("<pastie>", s"object pastel {\\n$code\\n}")
if (errless) echo("...but compilation found no error? Good luck with that.")
}
}
def compileCode() = {
val errless = intp compileSources new BatchSourceFile("<pastie>", code)
if (!errless) echo("There were compilation errors!")
}
if (code.nonEmpty) {
if (raw) compileCode() else interpretCode()
}
result
}
private object paste extends Pasted {
val ContinueString = " | "
val PromptString = "scala> "
def interpret(line: String): Unit = {
echo(line.trim)
intp interpret line
echo("")
}
def transcript(start: String) = {
echo("\\n// Detected repl transcript paste: ctrl-D to finish.\\n")
apply(Iterator(start) ++ readWhile(_.trim != PromptString.trim))
}
}
import paste.{ ContinueString, PromptString }
/** Interpret expressions starting with the first line.
* Read lines until a complete compilation unit is available
* or until a syntax error has been seen. If a full unit is
* read, go ahead and interpret it. Return the full string
* to be recorded for replay, if any.
*/
def interpretStartingWith(code: String): Option[String] = {
// signal completion non-completion input has been received
in.completion.resetVerbosity()
def reallyInterpret = {
val reallyResult = intp.interpret(code)
(reallyResult, reallyResult match {
case IR.Error => None
case IR.Success => Some(code)
case IR.Incomplete =>
if (in.interactive && code.endsWith("\\n\\n")) {
echo("You typed two blank lines. Starting a new command.")
None
}
else in.readLine(ContinueString) match {
case null =>
// we know compilation is going to fail since we're at EOF and the
// parser thinks the input is still incomplete, but since this is
// a file being read non-interactively we want to fail. So we send
// it straight to the compiler for the nice error message.
intp.compileString(code)
None
case line => interpretStartingWith(code + "\\n" + line)
}
})
}
/** Here we place ourselves between the user and the interpreter and examine
* the input they are ostensibly submitting. We intervene in several cases:
*
* 1) If the line starts with "scala> " it is assumed to be an interpreter paste.
* 2) If the line starts with "." (but not ".." or "./") it is treated as an invocation
* on the previous result.
* 3) If the Completion object's execute returns Some(_), we inject that value
* and avoid the interpreter, as it's likely not valid scala code.
*/
if (code == "") None
else if (!paste.running && code.trim.startsWith(PromptString)) {
paste.transcript(code)
None
}
else if (Completion.looksLikeInvocation(code) && intp.mostRecentVar != "") {
interpretStartingWith(intp.mostRecentVar + code)
}
else if (code.trim startsWith "//") {
// line comment, do nothing
None
}
else
reallyInterpret._2
}
// runs :load `file` on any files passed via -i
def loadFiles(settings: Settings) = settings match {
case settings: GenericRunnerSettings =>
for (filename <- settings.loadfiles.value) {
val cmd = ":load " + filename
command(cmd)
addReplay(cmd)
echo("")
}
case _ =>
}
/** Tries to create a JLineReader, falling back to SimpleReader:
* unless settings or properties are such that it should start
* with SimpleReader.
*/
def chooseReader(settings: Settings): InteractiveReader = {
if (settings.Xnojline || Properties.isEmacsShell)
SimpleReader()
else try new JLineReader(
if (settings.noCompletion) NoCompletion
else new SparkJLineCompletion(intp)
)
catch {
case ex @ (_: Exception | _: NoClassDefFoundError) =>
echo("Failed to created JLineReader: " + ex + "\\nFalling back to SimpleReader.")
SimpleReader()
}
}
protected def tagOfStaticClass[T: ClassTag]: u.TypeTag[T] =
u.TypeTag[T](
m,
new TypeCreator {
def apply[U <: Universe with Singleton](m: Mirror[U]): U # Type =
m.staticClass(classTag[T].runtimeClass.getName).toTypeConstructor.asInstanceOf[U # Type]
})
private def loopPostInit() {
// Bind intp somewhere out of the regular namespace where
// we can get at it in generated code.
intp.quietBind(NamedParam[SparkIMain]("$intp", intp)(tagOfStaticClass[SparkIMain], classTag[SparkIMain]))
// Auto-run code via some setting.
( replProps.replAutorunCode.option
flatMap (f => io.File(f).safeSlurp())
foreach (intp quietRun _)
)
// classloader and power mode setup
intp.setContextClassLoader()
if (isReplPower) {
// replProps.power setValue true
// unleashAndSetPhase()
// asyncMessage(power.banner)
}
// SI-7418 Now, and only now, can we enable TAB completion.
in match {
case x: JLineReader => x.consoleReader.postInit
case _ =>
}
}
def process(settings: Settings): Boolean = savingContextLoader {
this.settings = settings
createInterpreter()
// sets in to some kind of reader depending on environmental cues
in = in0.fold(chooseReader(settings))(r => SimpleReader(r, out, interactive = true))
globalFuture = future {
intp.initializeSynchronous()
loopPostInit()
!intp.reporter.hasErrors
}
import scala.concurrent.duration._
Await.ready(globalFuture, 10 seconds)
printWelcome()
initializeSpark()
loadFiles(settings)
try loop()
catch AbstractOrMissingHandler()
finally closeInterpreter()
true
}
@deprecated("Use `process` instead", "2.9.0")
def main(settings: Settings): Unit = process(settings) //used by sbt
}
object SparkILoop {
implicit def loopToInterpreter(repl: SparkILoop): SparkIMain = repl.intp
// Designed primarily for use by test code: take a String with a
// bunch of code, and prints out a transcript of what it would look
// like if you'd just typed it into the repl.
def runForTranscript(code: String, settings: Settings): String = {
import java.io.{ BufferedReader, StringReader, OutputStreamWriter }
stringFromStream { ostream =>
Console.withOut(ostream) {
val output = new JPrintWriter(new OutputStreamWriter(ostream), true) {
override def write(str: String) = {
// completely skip continuation lines
if (str forall (ch => ch.isWhitespace || ch == '|')) ()
else super.write(str)
}
}
val input = new BufferedReader(new StringReader(code.trim + "\\n")) {
override def readLine(): String = {
val s = super.readLine()
// helping out by printing the line being interpreted.
if (s != null)
output.println(s)
s
}
}
val repl = new SparkILoop(input, output)
if (settings.classpath.isDefault)
settings.classpath.value = sys.props("java.class.path")
repl process settings
}
}
}
/** Creates an interpreter loop with default settings and feeds
* the given code to it as input.
*/
def run(code: String, sets: Settings = new Settings): String = {
import java.io.{ BufferedReader, StringReader, OutputStreamWriter }
stringFromStream { ostream =>
Console.withOut(ostream) {
val input = new BufferedReader(new StringReader(code))
val output = new JPrintWriter(new OutputStreamWriter(ostream), true)
val repl = new SparkILoop(input, output)
if (sets.classpath.isDefault)
sets.classpath.value = sys.props("java.class.path")
repl process sets
}
}
}
def run(lines: List[String]): String = run(lines map (_ + "\\n") mkString)
}
| hengyicai/OnlineAggregationUCAS | repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala | Scala | apache-2.0 | 34,765 |
package sjc.delta.std
import org.scalatest.{Matchers, FreeSpec}
import sjc.delta.Delta.DeltaOps
import sjc.delta.std.set.{deltaSet, SetPatch}
class SetSpec extends FreeSpec with Matchers {
"set" in {
Set(1, 2).delta(Set(2, 3)) shouldBe SetPatch(removed = Set(1), added = Set(3))
}
} | stacycurl/delta | core/src/test/scala/sjc/delta/std/SetSpec.scala | Scala | apache-2.0 | 293 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.concurrent.TimeUnit
import kafka.api.FetchResponsePartitionData
import kafka.api.PartitionFetchInfo
import kafka.common.UnknownTopicOrPartitionException
import kafka.common.NotLeaderForPartitionException
import kafka.common.TopicAndPartition
import kafka.metrics.KafkaMetricsGroup
import scala.collection._
case class FetchPartitionStatus(startOffsetMetadata: LogOffsetMetadata, fetchInfo: PartitionFetchInfo) {
override def toString = "[startOffsetMetadata: " + startOffsetMetadata + ", " +
"fetchInfo: " + fetchInfo + "]"
}
/**
* The fetch metadata maintained by the delayed fetch operation
*/
case class FetchMetadata(fetchMinBytes: Int,
fetchOnlyLeader: Boolean,
fetchOnlyCommitted: Boolean,
isFromFollower: Boolean,
fetchPartitionStatus: Map[TopicAndPartition, FetchPartitionStatus]) {
override def toString = "[minBytes: " + fetchMinBytes + ", " +
"onlyLeader:" + fetchOnlyLeader + ", "
"onlyCommitted: " + fetchOnlyCommitted + ", "
"partitionStatus: " + fetchPartitionStatus + "]"
}
/**
* A delayed fetch operation that can be created by the replica manager and watched
* in the fetch operation purgatory
*/
class DelayedFetch(delayMs: Long,
fetchMetadata: FetchMetadata,
replicaManager: ReplicaManager,
responseCallback: Map[TopicAndPartition, FetchResponsePartitionData] => Unit)
extends DelayedOperation(delayMs) {
/**
* The operation can be completed if:
*
* Case A: This broker is no longer the leader for some partitions it tries to fetch
* Case B: This broker does not know of some partitions it tries to fetch
* Case C: The fetch offset locates not on the last segment of the log
* Case D: The accumulated bytes from all the fetching partitions exceeds the minimum bytes
*
* Upon completion, should return whatever data is available for each valid partition
*/
override def tryComplete() : Boolean = {
var accumulatedSize = 0
fetchMetadata.fetchPartitionStatus.foreach {
case (topicAndPartition, fetchStatus) =>
val fetchOffset = fetchStatus.startOffsetMetadata
try {
if (fetchOffset != LogOffsetMetadata.UnknownOffsetMetadata) {
val replica = replicaManager.getLeaderReplicaIfLocal(topicAndPartition.topic, topicAndPartition.partition)
val endOffset =
if (fetchMetadata.fetchOnlyCommitted)
replica.highWatermark
else
replica.logEndOffset
// Go directly to the check for Case D if the message offsets are the same. If the log segment
// has just rolled, then the high watermark offset will remain the same but be on the old segment,
// which would incorrectly be seen as an instance of Case C.
if (endOffset.messageOffset != fetchOffset.messageOffset) {
if (endOffset.onOlderSegment(fetchOffset)) {
// Case C, this can happen when the new fetch operation is on a truncated leader
debug("Satisfying fetch %s since it is fetching later segments of partition %s.".format(fetchMetadata, topicAndPartition))
return forceComplete()
} else if (fetchOffset.onOlderSegment(endOffset)) {
// Case C, this can happen when the fetch operation is falling behind the current segment
// or the partition has just rolled a new segment
debug("Satisfying fetch %s immediately since it is fetching older segments.".format(fetchMetadata))
return forceComplete()
} else if (fetchOffset.messageOffset < endOffset.messageOffset) {
// we need take the partition fetch size as upper bound when accumulating the bytes
accumulatedSize += math.min(endOffset.positionDiff(fetchOffset), fetchStatus.fetchInfo.fetchSize)
}
}
}
} catch {
case utpe: UnknownTopicOrPartitionException => // Case B
debug("Broker no longer know of %s, satisfy %s immediately".format(topicAndPartition, fetchMetadata))
return forceComplete()
case nle: NotLeaderForPartitionException => // Case A
debug("Broker is no longer the leader of %s, satisfy %s immediately".format(topicAndPartition, fetchMetadata))
return forceComplete()
}
}
// Case D
if (accumulatedSize >= fetchMetadata.fetchMinBytes)
forceComplete()
else
false
}
override def onExpiration() {
if (fetchMetadata.isFromFollower)
DelayedFetchMetrics.followerExpiredRequestMeter.mark()
else
DelayedFetchMetrics.consumerExpiredRequestMeter.mark()
}
/**
* Upon completion, read whatever data is available and pass to the complete callback
*/
override def onComplete() {
val logReadResults = replicaManager.readFromLocalLog(fetchMetadata.fetchOnlyLeader,
fetchMetadata.fetchOnlyCommitted,
fetchMetadata.fetchPartitionStatus.mapValues(status => status.fetchInfo))
val fetchPartitionData = logReadResults.mapValues(result =>
FetchResponsePartitionData(result.errorCode, result.hw, result.info.messageSet))
responseCallback(fetchPartitionData)
}
}
object DelayedFetchMetrics extends KafkaMetricsGroup {
private val FetcherTypeKey = "fetcherType"
val followerExpiredRequestMeter = newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS, tags = Map(FetcherTypeKey -> "follower"))
val consumerExpiredRequestMeter = newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS, tags = Map(FetcherTypeKey -> "consumer"))
}
| racker/kafka | core/src/main/scala/kafka/server/DelayedFetch.scala | Scala | apache-2.0 | 6,665 |
package com.datawizards.sparklocal.rdd
import org.apache.spark.Partitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import scala.collection.{GenIterable, GenMap, Map}
import scala.reflect.ClassTag
trait PairRDDFunctionsAPI[K, V] {
protected lazy val spark: SparkSession = SparkSession.builder().getOrCreate()
protected def parallelize[That: ClassTag](d: Seq[That]): RDD[That] = spark.sparkContext.parallelize(d)
protected def parallelize[That: ClassTag](d: GenIterable[That]): RDD[That] = parallelize(d.toList)
def mapValues[U: ClassTag](f: (V) => U): RDDAPI[(K, U)]
def keys: RDDAPI[K]
def values: RDDAPI[V]
def flatMapValues[U: ClassTag](f: (V) => TraversableOnce[U]): RDDAPI[(K, U)]
def countByKey(): GenMap[K, Long]
def reduceByKey(func: (V, V) => V): RDDAPI[(K, V)]
def reduceByKey(func: (V, V) => V, numPartitions: Int): RDDAPI[(K, V)]
def reduceByKey(partitioner: Partitioner, func: (V, V) => V): RDDAPI[(K, V)]
def reduceByKeyLocally(func: (V, V) => V): Map[K, V]
def groupByKey(): RDDAPI[(K, GenIterable[V])]
def groupByKey(numPartitions: Int): RDDAPI[(K, GenIterable[V])]
def groupByKey(partitioner: Partitioner): RDDAPI[(K, GenIterable[V])]
def foldByKey(zeroValue: V)(func: (V, V) => V): RDDAPI[(K, V)]
def foldByKey(zeroValue: V, numPartitions: Int)(func: (V, V) => V): RDDAPI[(K, V)]
def foldByKey(zeroValue: V, partitioner: Partitioner)(func: (V, V) => V): RDDAPI[(K, V)]
def join[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (V, W))]
def join[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (V, W))]
def join[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (V, W))]
def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (V, Option[W]))]
def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (V, Option[W]))]
def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (V, Option[W]))]
def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (Option[V], W))]
def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (Option[V], W))]
def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (Option[V], W))]
def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (Option[V], Option[W]))]
def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (Option[V], Option[W]))]
def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (Option[V], Option[W]))]
def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
def cogroup[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)]): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
def cogroup[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)]): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
def cogroup[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
def collectAsMap(): GenMap[K, V]
def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, V)]
def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, V)]
def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)], p: Partitioner): RDDAPI[(K, V)]
def aggregateByKey[U: ClassTag](zeroValue: U)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
def aggregateByKey[U: ClassTag](zeroValue: U, partitioner: Partitioner)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
def aggregateByKey[U: ClassTag](zeroValue: U, numPartitions: Int)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
def partitionBy(partitioner: Partitioner): RDDAPI[(K, V)]
}
| piotr-kalanski/spark-local | src/main/scala/com/datawizards/sparklocal/rdd/PairRDDFunctionsAPI.scala | Scala | apache-2.0 | 4,889 |
package com.temportalist.weepingangels.common.generation
import java.util.Random
import com.temportalist.origin.api.common.lib.{BlockState, LogHelper, V3O}
import com.temportalist.weepingangels.common.WeepingAngels
import com.temportalist.weepingangels.common.init.WABlocks
import com.temportalist.weepingangels.common.tile.TEStatue
import cpw.mods.fml.common.IWorldGenerator
import net.minecraft.block._
import net.minecraft.init.Blocks
import net.minecraft.tileentity.{TileEntityChest, TileEntityMobSpawner}
import net.minecraft.util.{AxisAlignedBB, WeightedRandomChestContent}
import net.minecraft.world.World
import net.minecraft.world.chunk.IChunkProvider
import net.minecraftforge.common.ChestGenHooks
import net.minecraftforge.common.util.ForgeDirection
/**
*
*
* @author TheTemportalist
*/
object VaultGenerator extends IWorldGenerator {
val rarity: Int = 2500
val lowestY: Int = 30
val highestY: Int = 5
val ladderRarity: Int = 10
override def generate(random: Random, chunkX: Int, chunkZ: Int, world: World,
chunkGenerator: IChunkProvider, chunkProvider: IChunkProvider): Unit = {
if (world.provider.dimensionId != 0) return
if (random.nextInt(this.rarity) != 0) {
return
}
val x: Int = chunkX * random.nextInt(16)
val z: Int = chunkZ * random.nextInt(16)
val topY: Int = this.getTopY(world, x, z)
if (topY < 0) {
return
}
val tubeLength: Int = random.nextInt(lowestY - highestY + 1) + lowestY
val y: Int = topY - 6 - tubeLength
val centerPos: V3O = new V3O(x, y, z)
this.clearArea(world, centerPos)
this.makeWalls(world, centerPos, random)
this.makeEntrance(world, centerPos, random)
this.makeFeatures(world, centerPos, random)
this.makeTube(world, centerPos, random, tubeLength)
//LogHelper.info("", centerX + ":" + centerY + ":" + centerZ)
}
def getTopY(world: World, x: Int, z: Int): Int = {
val pos: V3O = new V3O(x, 128, z)
while (pos.y >= 20) {
val block: Block = pos.getBlock(world)
if (block != Blocks.air) {
val box: AxisAlignedBB = AxisAlignedBB.getBoundingBox(
block.getBlockBoundsMinX,
block.getBlockBoundsMinY,
block.getBlockBoundsMinZ,
block.getBlockBoundsMaxX,
block.getBlockBoundsMaxY,
block.getBlockBoundsMaxZ
)
if (box != null && this.isSameScaleAABB(
box, AxisAlignedBB.getBoundingBox(0.0D, 0.0D, 0.0D, 1.0D, 1.0D, 1.0D)
)) {
return pos.y_i()
}
}
pos.down()
}
-1
}
def isSameScaleAABB(box1: AxisAlignedBB, box2: AxisAlignedBB): Boolean = {
V3O.from(box1) == V3O.from(box2)
}
def clearArea(world: World, pos: V3O): Unit = {
for (x <- pos.x_i() - 4 to pos.x_i() + 4) {
for (z <- pos.z_i() - 4 to pos.z_i() + 10) {
for (y <- pos.y_i() + 1 to pos.y_i() + 6) {
world.setBlockToAir(x, y, z)
}
}
}
}
def makeWalls(world: World, pos: V3O, random: Random): Unit = {
for (z <- pos.z_i() - 4 to pos.z_i() + 10) {
for (y <- pos.y_i() + 1 to pos.y_i() + 6) {
this.setBlock(world, random, new V3O(pos.x_i() - 4, y, z))
this.setBlock(world, random, new V3O(pos.x_i() + 4, y, z))
}
}
for (x <- pos.x_i() - 4 to pos.x_i() + 4) {
for (y <- pos.y_i() + 1 to pos.y_i() + 6) {
this.setBlock(world, random, new V3O(x, y, pos.z_i() - 4))
this.setBlock(world, random, new V3O(x, y, pos.z_i() + 10))
}
for (y <- pos.y_i() + 2 to pos.y_i() + 5)
this.setBlock(world, random, new V3O(x, y, pos.z_i() + 2))
}
for (x <- pos.x_i() - 4 to pos.x_i() + 4) {
for (z <- pos.z_i() - 4 to pos.z_i() + 10) {
this.setBlock(world, random, new V3O(x, pos.y_i() + 1, z))
this.setBlock(world, random, new V3O(x, pos.y_i() + 6, z))
}
}
}
def makeEntrance(world: World, centerPos: V3O, random: Random): Unit = {
//this.setBlock(centerX + 0, centerY + 0, centerZ + 0, random)
val pos: V3O = centerPos + V3O.UP
// top middle
this.setBlock(world, random, pos)
// stairs to path
this.setBlock(world, this.getStairs(ForgeDirection.EAST), pos.copy().west())
this.setBlock(world, this.getStairs(ForgeDirection.WEST), pos.copy().east())
this.setBlock(world, this.getStairs(ForgeDirection.SOUTH), pos.copy().north())
// path start into vault
this.setBlock(world, random, pos.copy().south(1))
// make hole into vault
this.setBlock(world, pos.copy().up(1).south(2), Blocks.iron_bars)
this.setBlock(world, pos.copy().up(2).south(2), Blocks.iron_bars)
// make 3 stairs into vault (post bars)
this.setBlock(world, this.getStairs(ForgeDirection.EAST), pos.copy().west(1).south(3))
this.setBlock(world, this.getStairs(ForgeDirection.WEST), pos.copy().east(1).south(3))
this.setBlock(world, this.getStairs(ForgeDirection.NORTH), pos.copy().east(1).south(3))
// entrance pillars
this.makePillar(world, random, pos.copy().east().south())
this.makePillar(world, random, pos.copy().east().north())
this.makePillar(world, random, pos.copy().west().south())
this.makePillar(world, random, pos.copy().west().north())
// walling in the back (excess)
this.makePillar(world, random, pos.copy().west(3).north(2))
this.makePillar(world, random, pos.copy().west(3).north(3))
this.makePillar(world, random, pos.copy().west(2).north(2))
this.makePillar(world, random, pos.copy().west(2).north(3))
// ^
this.makePillar(world, random, pos.copy().east(3).north(2))
this.makePillar(world, random, pos.copy().east(3).north(3))
this.makePillar(world, random, pos.copy().east(2).north(2))
this.makePillar(world, random, pos.copy().east(2).north(3))
}
def makePillar(world: World, random: Random, pos: V3O): Unit = {
this.setBlock(world, pos.copy(), random)
this.setBlock(world, pos.copy().up(), random)
this.setBlock(world, pos.copy().up(2), random)
this.setBlock(world, pos.copy().up(3), random)
}
def makeFeatures(world: World, centerPos: V3O, random: Random): Unit = {
val pos: V3O = centerPos.copy().south(6).up()
val statuePos: V3O = pos.copy().up()
val radius: Int = 3 // radius
// 7 Statues
this.setStatue(world, 0, statuePos.copy().south(radius))
this.setStatue(world, 315, statuePos.copy().west(radius).south(radius))
this.setStatue(world, 45, statuePos.copy().east(radius).south(radius))
this.setStatue(world, 270, statuePos.copy().west(radius))
this.setStatue(world, 90, statuePos.copy().east(radius))
this.setStatue(world, 225, statuePos.copy().west(radius).north(radius))
this.setStatue(world, 135, statuePos.copy().east(radius).north(radius))
val spawnerVec: V3O = pos.copy()
this.getLootOffsetPos(spawnerVec, random, radius)
// 1 Spawner
this.setBlock(world, spawnerVec, Blocks.mob_spawner)
spawnerVec.getTile(world) match {
case spawner: TileEntityMobSpawner =>
spawner.func_145881_a().setEntityName("weepingangel")
case _ =>
LogHelper.info(WeepingAngels.MODNAME,
"Failed to fetch mob spawner entity at (" + spawnerVec.x_i() + ", " +
spawnerVec.y_i() + ", " + spawnerVec.z_i() + ")"
)
}
// 2 Chests
this.setChest(world, pos, random, radius)
this.setChest(world, pos, random, radius)
}
def setChest(world: World, pos: V3O, random: Random, radius: Int): Unit = {
val chestPos: V3O = pos.copy()
this.getLootOffsetPos(chestPos, random, radius)
if (chestPos == pos) return
val block: Block = chestPos.getBlock(world)
//println(block.getClass.getCanonicalName)
if (block != Blocks.mob_spawner && block != Blocks.chest) {
if (block == WABlocks.statue) {
WeepingAngels.log("ERROR: chest trying to place at BlockStatue pos of " + pos.toString())
return
}
this.setBlock(world, chestPos, Blocks.chest)
chestPos.getTile(world) match {
case teChest: TileEntityChest =>
WeightedRandomChestContent.generateChestContents(random,
ChestGenHooks.getItems(ChestGenHooks.DUNGEON_CHEST, random), teChest,
ChestGenHooks.getCount(ChestGenHooks.DUNGEON_CHEST, random))
case _ =>
}
}
}
def getLootOffsetPos(vec: V3O, random: Random, radius: Int): Unit = {
random.nextInt(64) / 8 match {
case 0 =>
vec.south(radius)
case 1 =>
vec.west(radius)
vec.south(radius)
case 2 =>
vec.east(radius)
vec.south(radius)
case 3 =>
vec.west(radius)
case 4 =>
vec.east(radius)
case 5 =>
vec.west(radius)
vec.north(radius)
case 6 =>
vec.east(radius)
vec.north(radius)
case _ =>
}
}
def makeTube(world: World, centerPos: V3O, random: Random, height: Int): Unit = {
val pos: V3O = centerPos.copy().up(7)
// 0 = down
// 1 = up
// 2 = north +Z
// 3 = south -Z
// 4 = west +X
// 5 = east -X
var ladderFacing: ForgeDirection = ForgeDirection.NORTH
random.nextInt(3) match {
case 0 =>
pos.north(3)
ladderFacing = ForgeDirection.SOUTH
case 1 =>
pos.east(3)
ladderFacing = ForgeDirection.WEST
case 2 =>
pos.west(3)
ladderFacing = ForgeDirection.EAST
case _ =>
return
}
this.setBlock(world, pos.copy().down(), Blocks.air)
for (y1 <- -4 to height) {
val pos2: V3O = pos + new V3O(0, y1, 0)
if (pos2.y_i() >= pos.y_i()) {
this.setBlock(world, random, pos2.copy().west())
this.setBlock(world, random, pos2.copy().west().north())
this.setBlock(world, random, pos2.copy().west().south())
this.setBlock(world, random, pos2.copy().north())
this.setBlock(world, random, pos2.copy().south())
this.setBlock(world, random, pos2.copy().east())
this.setBlock(world, random, pos2.copy().east().north())
this.setBlock(world, random, pos2.copy().east().south())
}
if (random.nextInt(this.ladderRarity) != 0)
this.setBlock(world, pos2,
Blocks.ladder, ladderFacing.ordinal()
)
else this.setBlock(world, Blocks.air, pos2)
}
this.makeTubeEntrance(world, pos.copy().up(height), random)
}
def makeTubeEntrance(world: World, pos: V3O, random: Random): Unit = {
this.setBlock(world, this.getStairs(ForgeDirection.NORTH), pos.copy().up().south())
this.setBlock(world, this.getStairs(ForgeDirection.NORTH), pos.copy().west().up().south())
this.setBlock(world, this.getStairs(ForgeDirection.EAST), pos.copy().west().up())
this.setBlock(world, this.getStairs(ForgeDirection.EAST), pos.copy().west().up().north())
this.setBlock(world, this.getStairs(ForgeDirection.SOUTH), pos.copy().up().north())
this.setBlock(world, this.getStairs(ForgeDirection.SOUTH), pos.copy().east().up().north())
this.setBlock(world, this.getStairs(ForgeDirection.WEST), pos.copy().east().up())
this.setBlock(world, this.getStairs(ForgeDirection.WEST), pos.copy().east().up().south())
/* todo set trapdoor, experiement with metadata
this.setBlock(world, pos.up(), Blocks.trapdoor.
withProperty(BlockTrapDoor.FACING, EnumFacing.NORTH).
withProperty(BlockTrapDoor.OPEN, false).
withProperty(BlockTrapDoor.HALF, BlockTrapDoor.DoorHalf.TOP)
)
*/
this.setStatue(world, random.nextFloat() * 360, pos.copy().up(2))
}
def getStairs(dir: ForgeDirection): BlockState = {
// http://minecraft.gamepedia.com/Data_values#Stairs
val bit1and2: Int =
if (dir == ForgeDirection.EAST) 0
else if (dir == ForgeDirection.WEST) 1
else if (dir == ForgeDirection.SOUTH) 2
else if (dir == ForgeDirection.NORTH) 3
else -1
val bit4: Int = 0 // right side up stairs (1 if upsidedown)
val meta = bit1and2 | bit4
new BlockState(Blocks.stone_brick_stairs, meta)
}
def setStatue(world: World, rot: Float, pos: V3O): Unit = {
this.setBlock(world, pos, WABlocks.statue)
pos.getTile(world) match {
case te: TEStatue =>
te.setRotation(rot)
case _ =>
LogHelper.info(WeepingAngels.MODNAME,
"Failed to fetch statue entity at (" + pos.x_i() + ", " + pos.y_i() + ", " +
pos.z_i() + ")"
)
}
}
def setBlock(world: World, random: Random, pos: V3O): Unit = {
this.setBlock(world, pos, random)
}
def setBlock(world: World, pos: V3O, random: Random): Unit = {
val state: BlockState = this.getBlock(random)
this.setBlock(world, pos, state.getBlock, state.getMeta)
}
def setBlock(world: World, block: Block, pos: V3O): Unit = {
this.setBlock(world, pos, block)
}
def setBlock(world: World, state: BlockState, pos: V3O): Unit =
this.setBlock(world, pos, state.getBlock, state.getMeta)
def setBlock(world: World, pos: V3O, block: Block): Unit = {
this.setBlock(world, pos, block, 0)
}
def setBlock(world: World, pos:V3O, block: Block, meta: Int): Unit = {
pos.setBlock(world, block, meta, 2)
}
def getBlock(random: Random): BlockState = {
val chance: Int = random.nextInt(100) + 1
/*
50% brick
25% mossy brick
12% cobble
12% cracked
1% air
*/
if (chance <= 50) {
// 1 - 50 (50%)
new BlockState(Blocks.stonebrick, 0)
}
else if (chance <= 75) {
// 51 - 75 (25%)
new BlockState(Blocks.stonebrick, 1)
}
else if (chance <= 87) {
// 76 - 87 (12%)
new BlockState(Blocks.cobblestone, 0)
}
else if (chance <= 99) {
// 88 - 99 (12%)
new BlockState(Blocks.stonebrick, 1)
}
else {
new BlockState(Blocks.air, 0)
}
}
}
| TheTemportalist/WeepingAngels | src/main/scala/com/temportalist/weepingangels/common/generation/VaultGenerator.scala | Scala | apache-2.0 | 12,993 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP503(value: Option[Int]) extends CtBoxIdentifier(name = "Income from property expenses") with CtOptionalInteger with Input with ValidatableBox[ComputationsBoxRetriever] {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = validateZeroOrPositiveInteger(this) ++ validateNotExceedingCP501(this, boxRetriever)
private def validateNotExceedingCP501(box: CP503, retriever: ComputationsBoxRetriever): Set[CtValidation] = {
(box.value, retriever.cp501().value) match {
case (Some(x), Some(y)) if x > y => Set(CtValidation(Some(box.id), s"error.${box.id}.propertyExpensesExceedsIncome"))
case (Some(x), None) if x > 0 => Set(CtValidation(Some(box.id), s"error.${box.id}.propertyExpensesExceedsIncome"))
case _ => Set()
}
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP503.scala | Scala | apache-2.0 | 1,536 |
package spark.broadcast
import java.io._
import java.net._
import java.util.{BitSet, UUID}
import java.util.concurrent.{Executors, ThreadFactory, ThreadPoolExecutor}
import spark._
trait Broadcast[T] extends Serializable {
val uuid = UUID.randomUUID
def value: T
// We cannot have an abstract readObject here due to some weird issues with
// readObject having to be 'private' in sub-classes. Possibly a Scala bug!
override def toString = "spark.Broadcast(" + uuid + ")"
}
object Broadcast
extends Logging with Serializable {
// Messages
val REGISTER_BROADCAST_TRACKER = 0
val UNREGISTER_BROADCAST_TRACKER = 1
val FIND_BROADCAST_TRACKER = 2
val GET_UPDATED_SHARE = 3
private var initialized = false
private var isMaster_ = false
private var broadcastFactory: BroadcastFactory = null
// Called by SparkContext or Executor before using Broadcast
def initialize (isMaster__ : Boolean): Unit = synchronized {
if (!initialized) {
val broadcastFactoryClass = System.getProperty(
"spark.broadcast.factory", "spark.broadcast.DfsBroadcastFactory")
//"spark.broadcast.factory", "spark.broadcast.BitTorrentBroadcastFactory")
//"spark.broadcast.factory", "spark.broadcast.ChainedBroadcastFactory")
broadcastFactory =
Class.forName(broadcastFactoryClass).newInstance.asInstanceOf[BroadcastFactory]
// Setup isMaster before using it
isMaster_ = isMaster__
// Set masterHostAddress to the master's IP address for the slaves to read
if (isMaster) {
System.setProperty("spark.broadcast.masterHostAddress", Utils.localIpAddress)
}
// Initialize appropriate BroadcastFactory and BroadcastObject
broadcastFactory.initialize(isMaster)
initialized = true
}
}
def getBroadcastFactory: BroadcastFactory = {
if (broadcastFactory == null) {
throw new SparkException ("Broadcast.getBroadcastFactory called before initialize")
}
broadcastFactory
}
// Load common broadcast-related config parameters
private var MasterHostAddress_ = System.getProperty(
"spark.broadcast.masterHostAddress", "")
private var MasterTrackerPort_ = System.getProperty(
"spark.broadcast.masterTrackerPort", "11111").toInt
private var BlockSize_ = System.getProperty(
"spark.broadcast.blockSize", "4096").toInt * 1024
private var MaxRetryCount_ = System.getProperty(
"spark.broadcast.maxRetryCount", "2").toInt
private var TrackerSocketTimeout_ = System.getProperty(
"spark.broadcast.trackerSocketTimeout", "50000").toInt
private var ServerSocketTimeout_ = System.getProperty(
"spark.broadcast.serverSocketTimeout", "10000").toInt
private var MinKnockInterval_ = System.getProperty(
"spark.broadcast.minKnockInterval", "500").toInt
private var MaxKnockInterval_ = System.getProperty(
"spark.broadcast.maxKnockInterval", "999").toInt
// Load ChainedBroadcast config params
// Load TreeBroadcast config params
private var MaxDegree_ = System.getProperty("spark.broadcast.maxDegree", "2").toInt
// Load BitTorrentBroadcast config params
private var MaxPeersInGuideResponse_ = System.getProperty(
"spark.broadcast.maxPeersInGuideResponse", "4").toInt
private var MaxRxSlots_ = System.getProperty(
"spark.broadcast.maxRxSlots", "4").toInt
private var MaxTxSlots_ = System.getProperty(
"spark.broadcast.maxTxSlots", "4").toInt
private var MaxChatTime_ = System.getProperty(
"spark.broadcast.maxChatTime", "500").toInt
private var MaxChatBlocks_ = System.getProperty(
"spark.broadcast.maxChatBlocks", "1024").toInt
private var EndGameFraction_ = System.getProperty(
"spark.broadcast.endGameFraction", "0.95").toDouble
def isMaster = isMaster_
// Common config params
def MasterHostAddress = MasterHostAddress_
def MasterTrackerPort = MasterTrackerPort_
def BlockSize = BlockSize_
def MaxRetryCount = MaxRetryCount_
def TrackerSocketTimeout = TrackerSocketTimeout_
def ServerSocketTimeout = ServerSocketTimeout_
def MinKnockInterval = MinKnockInterval_
def MaxKnockInterval = MaxKnockInterval_
// ChainedBroadcast configs
// TreeBroadcast configs
def MaxDegree = MaxDegree_
// BitTorrentBroadcast configs
def MaxPeersInGuideResponse = MaxPeersInGuideResponse_
def MaxRxSlots = MaxRxSlots_
def MaxTxSlots = MaxTxSlots_
def MaxChatTime = MaxChatTime_
def MaxChatBlocks = MaxChatBlocks_
def EndGameFraction = EndGameFraction_
// Helper functions to convert an object to Array[BroadcastBlock]
def blockifyObject[IN](obj: IN): VariableInfo = {
val baos = new ByteArrayOutputStream
val oos = new ObjectOutputStream(baos)
oos.writeObject(obj)
oos.close()
baos.close()
val byteArray = baos.toByteArray
val bais = new ByteArrayInputStream(byteArray)
var blockNum = (byteArray.length / Broadcast.BlockSize)
if (byteArray.length % Broadcast.BlockSize != 0)
blockNum += 1
var retVal = new Array[BroadcastBlock](blockNum)
var blockID = 0
for (i <- 0 until (byteArray.length, Broadcast.BlockSize)) {
val thisBlockSize = math.min(Broadcast.BlockSize, byteArray.length - i)
var tempByteArray = new Array[Byte](thisBlockSize)
val hasRead = bais.read(tempByteArray, 0, thisBlockSize)
retVal(blockID) = new BroadcastBlock(blockID, tempByteArray)
blockID += 1
}
bais.close()
var variableInfo = VariableInfo(retVal, blockNum, byteArray.length)
variableInfo.hasBlocks = blockNum
return variableInfo
}
// Helper function to convert Array[BroadcastBlock] to object
def unBlockifyObject[OUT](arrayOfBlocks: Array[BroadcastBlock],
totalBytes: Int,
totalBlocks: Int): OUT = {
var retByteArray = new Array[Byte](totalBytes)
for (i <- 0 until totalBlocks) {
System.arraycopy(arrayOfBlocks(i).byteArray, 0, retByteArray,
i * Broadcast.BlockSize, arrayOfBlocks(i).byteArray.length)
}
byteArrayToObject(retByteArray)
}
private def byteArrayToObject[OUT](bytes: Array[Byte]): OUT = {
val in = new ObjectInputStream (new ByteArrayInputStream (bytes)){
override def resolveClass(desc: ObjectStreamClass) =
Class.forName(desc.getName, false, Thread.currentThread.getContextClassLoader)
}
val retVal = in.readObject.asInstanceOf[OUT]
in.close()
return retVal
}
}
case class BroadcastBlock (val blockID: Int, val byteArray: Array[Byte]) extends Serializable
case class VariableInfo (@transient val arrayOfBlocks : Array[BroadcastBlock],
val totalBlocks: Int,
val totalBytes: Int) extends Serializable {
@transient var hasBlocks = 0
}
class SpeedTracker extends Serializable {
// Mapping 'source' to '(totalTime, numBlocks)'
private var sourceToSpeedMap = Map[SourceInfo, (Long, Int)] ()
def addDataPoint (srcInfo: SourceInfo, timeInMillis: Long): Unit = {
sourceToSpeedMap.synchronized {
if (!sourceToSpeedMap.contains(srcInfo)) {
sourceToSpeedMap += (srcInfo -> (timeInMillis, 1))
} else {
val tTnB = sourceToSpeedMap (srcInfo)
sourceToSpeedMap += (srcInfo -> (tTnB._1 + timeInMillis, tTnB._2 + 1))
}
}
}
def getTimePerBlock (srcInfo: SourceInfo): Double = {
sourceToSpeedMap.synchronized {
val tTnB = sourceToSpeedMap (srcInfo)
return tTnB._1 / tTnB._2
}
}
override def toString = sourceToSpeedMap.toString
}
| jperla/spark-advancers | core/src/main/scala/spark/broadcast/Broadcast.scala | Scala | bsd-3-clause | 7,570 |
/*
* Copyright (c) 2013 original authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eigengo.monitor.output.codahalemetrics
import com.codahale.metrics.MetricRegistry
import org.specs2.mutable.Specification
import akka.actor.ActorSystem
class RegistryProviderSpec extends Specification {
implicit val system = ActorSystem()
"The registry functionality" should {
"allow for the creation of a valid RegistryProvider" in {
val provider = RegistryFactory.getRegistryProvider("org.eigengo.monitor.output.codahalemetrics.TestRegistryProvider")
provider must beAnInstanceOf[TestRegistryProvider]
}
"create a default RegistryProvider when an invalid class is given" in {
val provider = RegistryFactory.getRegistryProvider("myclass")
provider must beAnInstanceOf[DefaultRegistryProvider]
}
}
step {
system.shutdown
}
}
class TestRegistryProvider extends RegistryProvider {
val registry = new MetricRegistry()
} | eigengo/monitor | output-codahalemetrics/src/test/scala/org/eigengo/monitor/output/codahalemetrics/RegistryProviderSpec.scala | Scala | apache-2.0 | 1,496 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.codetools
.t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.codetools.impl
import s_mach.codetools.macros.{ProductTypeHelper, Result}
import scala.reflect.macros.blackbox
trait BlackboxHelperImpl extends ProductTypeHelper {
val c:blackbox.Context
import c.universe._
object Impl {
def getOrAbort[A,X](r: Result[A]) : A = {
r.fold(
isSuccess = { s:Result.Success[A] =>
logIssues(r.zomIssue)
s.value
},
isFailure = { f:Result.Failure =>
val (zomIssue,lastError) =
r.zomIssue.splitAt(r.zomIssue.lastIndexWhere(_.isError))
logIssues(zomIssue)
c.abort(
c.enclosingPosition,
lastError.head.message
)
}
)
}
def logIssues(zomIssue: List[Result.Issue]) = {
zomIssue.foreach {
case r@Result.Error(_,_) => c.error(c.enclosingPosition,r.print)
case r@Result.Warning(_,_) => c.warning(c.enclosingPosition,r.print)
case r@Result.Info(_) => c.info(c.enclosingPosition,r.print,true)
case r@Result.Debug(_) =>
if(showDebug) {
c.info(c.enclosingPosition,r.print,false)
}
}
}
def inferImplicit(aType:c.Type) : Result[c.Tree] = {
val result = c.inferImplicitValue(aType)
if (result.isEmpty) {
Result.error(s"Implicit $aType does not exist")
} else {
Result(result)
}
}
def getCompanionMethod(
aType: c.Type,
methodName: String
) : Result[MethodSymbol] = {
aType.typeSymbol.companion.typeSignature.decl(TermName(methodName)) match {
case NoSymbol =>
Result.error(s"$aType.$methodName method does not exist")
case s => Result(s.asMethod)
}
}
val tupleNamePattern = "scala\\\\.Tuple\\\\d{1,2}".r.pattern
def isTupleType(aType:c.Type) : Boolean = {
tupleNamePattern.matcher(aType.typeSymbol.fullName.toString).matches
}
type TypeSig = List[String]
def calcProductType(aType: c.Type): Result[ProductType] = {
val aTypeParams = aType.typeConstructor.typeParams.map(_.toString)
def filterMethod(method:MethodSymbol) : Result[Option[MethodSymbol]] = {
if(
// Silently ignore methods who have a different count of type parameters
// than the type
method.typeParams.size == aType.typeConstructor.typeParams.size &&
// Silently ignore curried methods
method.paramLists.size == 1
) {
// Ignore but warn about methods whose type parameters are exact match
// size but whose symbols don't match the type's type parameter symbols
// exactly. Using just apply/unapply its not possible to figure out
// how the type parameters align to the type's type parameter without
// just assuming that they align exactly in the same order.
/*
Apply method can have different symbols in different order than type's
type parameters:
class A[X,Y](value1: X, value2: Y)
object A {
// compiles but possible weirdness in macro here! so just ignore it and warn
def apply[BB,AA](value1: AA, value2: BB) =
new A(value1,value2)
}
*/
if(method.typeParams.map(_.toString) == aTypeParams) {
Result(Some(method))
} else {
Result(
None,
Result.Warning(s"Ignoring possible matching $method method whose type parameter symbols ${method.typeParams} do not match ${aType.typeSymbol} type parameter symbols ${aType.typeConstructor.typeParams}")
)
}
} else {
Result(None)
}
}
def mkTypeSig(oomType: List[c.Type]) : List[String] = {
val (zomTypeParams,zomConcreteTypes) =
oomType
.zipWithIndex
.partition(_._1.typeSymbol.isParameter)
val unsortedTypeNames =
zomTypeParams.zip(aTypeParams).map { case ((_, index), aTypeParam) =>
(aTypeParam, index)
} :::
zomConcreteTypes.map { case (_type, index) => (_type.typeSymbol.fullName,index) }
unsortedTypeNames
.sortBy(_._2)
.map(_._1)
}
def calcOomUnapplyTypeSig(
unapplyMethod:MethodSymbol
) : List[TypeSig] = {
// Outer type for unapply is always Option, so strip it
val TypeRef(_,_,oneOuterArg) = unapplyMethod.returnType
oneOuterArg.head match {
/*
if the unapply methods returns Option[TupleX[A,B..Z]] then there are
two possible product types (matching apply method determines which
is correct):
1) _:TupleXX[A,B,..Z]
2) _:A,_:B,.._:Z
*/
case tupleType@TypeRef(_, symbol, innerArgs)
if innerArgs.nonEmpty & isTupleType(tupleType) =>
List(mkTypeSig(List(tupleType)), mkTypeSig(innerArgs))
// For anything else there is only one possible struct type
case typeRef@TypeRef(_, _, _) =>
List(mkTypeSig(List(typeRef)))
}
}
def calcApplyTypeSig(
applyMethod:MethodSymbol
) : TypeSig = {
mkTypeSig(
applyMethod.paramLists.head.map { param =>
param.typeSignature
}
)
}
Result.applicative(
getCompanionMethod(aType,"unapply"),
getCompanionMethod(aType,"apply")
) { (rawUnapplyMethod, applyMethod) =>
Result.applicative(
filterMethod(rawUnapplyMethod),
Result.sequence {
applyMethod
.asTerm
.alternatives
.map { m =>
filterMethod(m.asMethod)
}
}.map(_.flatten)
) { (optUnapplyMethod,_oomApplyMethod) =>
(_oomApplyMethod, optUnapplyMethod) match {
case (Nil, _) =>
Result.error(s"No eligible apply method found")
case (_, None) =>
Result.error(s"No eligible unapply method found")
case (oomApplyMethod,Some(unapplyMethod)) =>
// Search for first unapply type sig that matches an apply type sig
val oomUnapplyTypeSig = calcOomUnapplyTypeSig(unapplyMethod)
val lazySearch =
oomUnapplyTypeSig.toStream.map { unapplyTypeSig =>
oomApplyMethod.find { applyMethod =>
calcApplyTypeSig(applyMethod) == unapplyTypeSig
} match {
case Some(matchingApplyMethod) =>
Some {
(
matchingApplyMethod,
unapplyMethod
)
}
case None => None
}
}
lazySearch.collectFirst { case Some((matchingApplyMethod, matchingUnapplyMethod)) =>
// TODO: figure out proper way to get method type params to match type type params
val methodTypeParamToTypeParam =
matchingApplyMethod.typeParams
.map(_.fullName)
.zip(aType.typeArgs)
.toMap
val productType = aType
val oomField =
matchingApplyMethod.paramLists.head
.zipWithIndex
.map { case (symbol,index) =>
val symType = symbol.typeSignature
val _type =
methodTypeParamToTypeParam.getOrElse(
symType.typeSymbol.fullName,
symType
)
ProductType.Field(index,symbol.name.toString,_type)
}
val allApplyArgsAreFields = oomField.forall { case field =>
aType.member(TermName(field.name)) match {
case NoSymbol => false
case memberSymbol => memberSymbol.asMethod match {
case NoSymbol => false
case methodSymbol =>
methodSymbol != NoSymbol &&
methodSymbol.getter != NoSymbol
}
}
}
ProductType(
_type = productType,
oomField = oomField,
applyMethod = applyMethod,
unapplyMethod = unapplyMethod,
allApplyArgsAreFields: Boolean
)
} match {
case Some(productType) => Result(productType)
case None =>
Result.error {
s"No matching apply/unapply method pair found for ${aType.typeSymbol.fullName}\\n" +
s"Found ${oomApplyMethod.size} apply methods:\\n" +
oomApplyMethod.map(applyMethod => calcApplyTypeSig(applyMethod).mkString(",")).mkString("\\n ") +
s"Found unapply type signatures:\\n" +
oomUnapplyTypeSig.map(_.mkString(",")).mkString("\\n ")
}
}
}
}
}
}
}
}
| S-Mach/s_mach.codetools | codetools-core/src/main/scala/s_mach/codetools/impl/BlackboxHelperImpl.scala | Scala | mit | 10,292 |
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.MetadataAPI.Utility
import java.io.File
import com.ligadata.MetadataAPI.MetadataAPIImpl
import scala.io.Source
import org.apache.logging.log4j._
/**
* Created by dhaval on 8/13/15.
*/
object ConfigService {
private val userid: Option[String] = Some("metadataapi")
val loggerName = this.getClass.getName
lazy val logger = LogManager.getLogger(loggerName)
def uploadClusterConfig(input: String): String ={
var response = ""
var configFileDir: String = ""
//val gitMsgFile = "https://raw.githubusercontent.com/ligadata-dhaval/Kamanja/master/HelloWorld_Msg_Def.json"
if (input == "") {
configFileDir = MetadataAPIImpl.GetMetadataAPIConfig.getProperty("CONFIG_FILES_DIR")
if (configFileDir == null) {
response = "CONFIG_FILES_DIR property missing in the metadata API configuration"
} else {
//verify the directory where messages can be present
IsValidDir(configFileDir) match {
case true => {
//get all files with json extension
val types: Array[File] = new java.io.File(configFileDir).listFiles.filter(_.getName.endsWith(".json"))
types.length match {
case 0 => {
println("Configs not found at " + configFileDir)
response="Configs not found at " + configFileDir
}
case option => {
val configDefs = getUserInputFromMainMenu(types)
for (configDef <- configDefs) {
response += MetadataAPIImpl.UploadConfig(configDef.toString, userid, "configuration")
}
}
}
}
case false => {
//println("Message directory is invalid.")
response = "Config directory is invalid."
}
}
}
} else {
//input provided
var message = new File(input.toString)
val configDef = Source.fromFile(message).mkString
response = MetadataAPIImpl.UploadConfig(configDef.toString, userid, "configuration")
}
response
}
def uploadCompileConfig(input: String): String ={
var response = ""
var configFileDir: String = ""
//val gitMsgFile = "https://raw.githubusercontent.com/ligadata-dhaval/Kamanja/master/HelloWorld_Msg_Def.json"
if (input == "") {
configFileDir = MetadataAPIImpl.GetMetadataAPIConfig.getProperty("CONFIG_FILES_DIR")
if (configFileDir == null) {
response = "CONFIG_FILES_DIR property missing in the metadata API configuration"
} else {
//verify the directory where messages can be present
IsValidDir(configFileDir) match {
case true => {
//get all files with json extension
val types: Array[File] = new java.io.File(configFileDir).listFiles.filter(_.getName.endsWith(".json"))
types.length match {
case 0 => {
println("Configs not found at " + configFileDir)
response="Configs not found at " + configFileDir
}
case option => {
val configDefs = getUserInputFromMainMenu(types)
for (configDef <- configDefs) {
response += MetadataAPIImpl.UploadModelsConfig(configDef.toString, userid, "configuration")
}
}
}
}
case false => {
//println("Message directory is invalid.")
response = "Config directory is invalid."
}
}
}
} else {
//input provided
var message = new File(input.toString)
val configDef = Source.fromFile(message).mkString
response = MetadataAPIImpl.UploadModelsConfig(configDef.toString, userid, "configuration")
}
response
}
def dumpAllCfgObjects: String ={
var response=""
try{
response= MetadataAPIImpl.GetAllCfgObjects("JSON", userid)
}
catch {
case e: Exception => {
response=e.getStackTrace.toString
}
}
response
}
def removeEngineConfig: String ={
var response="TO BE IMPLEMENTED"
response
}
def IsValidDir(dirName: String): Boolean = {
val iFile = new File(dirName)
if (!iFile.exists) {
println("The File Path (" + dirName + ") is not found: ")
false
} else if (!iFile.isDirectory) {
println("The File Path (" + dirName + ") is not a directory: ")
false
} else
true
}
def getUserInputFromMainMenu(messages: Array[File]): Array[String] = {
var listOfMsgDef: Array[String] = Array[String]()
var srNo = 0
println("\\nPick a Config Definition file(s) from below choices\\n")
for (message <- messages) {
srNo += 1
println("[" + srNo + "]" + message)
}
print("\\nEnter your choice(If more than 1 choice, please use commas to seperate them): \\n")
val userOptions: List[Int] = Console.readLine().filter(_ != '\\n').split(',').filter(ch => (ch != null && ch != "")).map(_.trim.toInt).toList
//check if user input valid. If not exit
for (userOption <- userOptions) {
userOption match {
case userOption if (1 to srNo).contains(userOption) => {
//find the file location corresponding to the message
var message = messages(userOption - 1)
//process message
val messageDef = Source.fromFile(message).mkString
listOfMsgDef = listOfMsgDef :+ messageDef
}
case _ => {
println("Unknown option: ")
}
}
}
listOfMsgDef
}
}
| traytonwhite/Kamanja | trunk/MetadataAPI/src/main/scala/com/ligadata/MetadataAPI/Utility/ConfigService.scala | Scala | apache-2.0 | 6,109 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.ids.cas.ticket
import org.beangle.cache.Cache
import org.beangle.cache.redis.RedisCacheManager
import org.beangle.commons.io.DefaultBinarySerializer
import org.beangle.ids.cas.service.Services
import redis.clients.jedis.JedisPool
class DefaultTicketCacheService extends TicketCacheService {
private[this] var tickets: Cache[String, DefaultServiceTicket] = _
private[this] var services: Cache[String, Services] = _
def this(pool: JedisPool) = {
this()
DefaultBinarySerializer.registerClass(classOf[Services])
DefaultBinarySerializer.registerClass(classOf[DefaultServiceTicket])
val cacheManager = new RedisCacheManager(pool, DefaultBinarySerializer, true)
cacheManager.ttl = 60
tickets = cacheManager.getCache("cas_tickets", classOf[String], classOf[DefaultServiceTicket])
cacheManager.ttl = 6 * 60 * 60 //six hour
services = cacheManager.getCache("cas_services", classOf[String], classOf[Services])
}
override def getTicketCache: Cache[String, DefaultServiceTicket] = {
tickets
}
override def getServiceCache: Cache[String, Services] = {
services
}
}
| beangle/ids | cas/src/main/scala/org/beangle/ids/cas/ticket/DefaultTicketCacheService.scala | Scala | lgpl-3.0 | 1,855 |
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
object Day8 {
def openFile() : Array[String] = {
var lines = Array[String]()
val buffered = Source.fromFile("input.txt")
for (line <- buffered.getLines) {
lines = lines :+ line.trim
}
buffered.close
lines
}
def main(args: Array[String]) : Unit = {
val lines = openFile()
var screen = ArrayBuffer.fill(6, 50)(false)
for (line <- lines) {
val split = line.split(" ")
split match {
case Array("rect", _*) => {
val Array(width, height) = split(1).split("x").map(_.toInt)
(0 to height - 1).foreach(y => {
(0 to width - 1).foreach(x => screen(y)(x) = true)
})
}
case Array("rotate", "row", _*) => {
val row = split(2).split("=")(1).toInt
val amount = split(4).toInt
screen(row) = screen(row).takeRight(amount) ++ screen(row).dropRight(amount)
}
case Array("rotate", "column", _*) => {
val column = split(2).split("=")(1).toInt
val amount = split(4).toInt
val mappedColumn = screen.map(_(column))
val shiftedColumn = mappedColumn.takeRight(amount) ++ mappedColumn.dropRight(amount)
(0 to screen.length - 1).foreach(i => screen(i)(column) = shiftedColumn(i))
}
}
}
printScreen(screen)
println
println(screen.flatten.filter(x => x).length)
}
def printScreen(screen: ArrayBuffer[ArrayBuffer[Boolean]]) : Unit = {
screen.foreach(row => {
row.foreach(column => {
if (column) print("#")
else print(".")
})
println
})
}
} | dooleykh/Advent2016 | 8/8.scala | Scala | mit | 1,985 |
/** Adapted from the original implementation of WeakHashSet in scala-reflect
*/
package dotty.tools.dotc.util
import java.lang.ref.{ReferenceQueue, WeakReference}
import scala.annotation.{ constructorOnly, tailrec }
import dotty.tools._
/**
* A HashSet where the elements are stored weakly. Elements in this set are eligible for GC if no other
* hard references are associated with them. Its primary use case is as a canonical reference
* identity holder (aka "hash-consing") via findEntryOrUpdate
*
* This Set implementation cannot hold null. Any attempt to put a null in it will result in a NullPointerException
*
* This set implementation is not in general thread safe without external concurrency control. However it behaves
* properly when GC concurrently collects elements in this set.
*/
abstract class WeakHashSet[A <: AnyRef](initialCapacity: Int = 8, loadFactor: Double = 0.5) extends MutableSet[A] {
import WeakHashSet._
type This = WeakHashSet[A]
/**
* queue of Entries that hold elements scheduled for GC
* the removeStaleEntries() method works through the queue to remove
* stale entries from the table
*/
protected val queue = new ReferenceQueue[A]
/**
* the number of elements in this set
*/
protected var count = 0
/**
* from a specified initial capacity compute the capacity we'll use as being the next
* power of two equal to or greater than the specified initial capacity
*/
private def computeCapacity = {
if (initialCapacity < 0) throw new IllegalArgumentException("initial capacity cannot be less than 0")
var candidate = 1
while (candidate < initialCapacity)
candidate *= 2
candidate
}
/**
* the underlying table of entries which is an array of Entry linked lists
*/
protected var table = new Array[Entry[A] | Null](computeCapacity)
/**
* the limit at which we'll increase the size of the hash table
*/
protected var threshold = computeThreshold
private def computeThreshold: Int = (table.size * loadFactor).ceil.toInt
protected def hash(key: A): Int
protected def isEqual(x: A, y: A): Boolean = x.equals(y)
/** Turn hashcode `x` into a table index */
protected def index(x: Int): Int = x & (table.length - 1)
/**
* remove a single entry from a linked list in a given bucket
*/
private def remove(bucket: Int, prevEntry: Entry[A] | Null, entry: Entry[A]): Unit = {
Stats.record(statsItem("remove"))
prevEntry match {
case null => table(bucket) = entry.tail
case _ => prevEntry.tail = entry.tail
}
count -= 1
}
/**
* remove entries associated with elements that have been gc'ed
*/
protected def removeStaleEntries(): Unit = {
def poll(): Entry[A] | Null = queue.poll().asInstanceOf
@tailrec
def queueLoop(): Unit = {
val stale = poll()
if (stale != null) {
val bucket = index(stale.hash)
@tailrec
def linkedListLoop(prevEntry: Entry[A] | Null, entry: Entry[A] | Null): Unit =
if entry != null then
if stale eq entry then remove(bucket, prevEntry, entry)
else linkedListLoop(entry, entry.tail)
linkedListLoop(null, table(bucket))
queueLoop()
}
}
queueLoop()
}
/**
* Double the size of the internal table
*/
protected def resize(): Unit = {
Stats.record(statsItem("resize"))
val oldTable = table
table = new Array[Entry[A] | Null](oldTable.size * 2)
threshold = computeThreshold
@tailrec
def tableLoop(oldBucket: Int): Unit = if (oldBucket < oldTable.size) {
@tailrec
def linkedListLoop(entry: Entry[A] | Null): Unit = entry match {
case null => ()
case _ =>
val bucket = index(entry.hash)
val oldNext = entry.tail
entry.tail = table(bucket)
table(bucket) = entry
linkedListLoop(oldNext)
}
linkedListLoop(oldTable(oldBucket))
tableLoop(oldBucket + 1)
}
tableLoop(0)
}
// TODO: remove the `case null` when we can enable explicit nulls in regular compiling,
// since the type `A <: AnyRef` of `elem` can ensure the value is not null.
def lookup(elem: A): A | Null = (elem: A | Null) match {
case null => throw new NullPointerException("WeakHashSet cannot hold nulls")
case _ =>
Stats.record(statsItem("lookup"))
removeStaleEntries()
val bucket = index(hash(elem))
@tailrec
def linkedListLoop(entry: Entry[A] | Null): A | Null = entry match {
case null => null
case _ =>
val entryElem = entry.get
if entryElem != null && isEqual(elem, entryElem) then entryElem
else linkedListLoop(entry.tail)
}
linkedListLoop(table(bucket))
}
protected def addEntryAt(bucket: Int, elem: A, elemHash: Int, oldHead: Entry[A] | Null): A = {
Stats.record(statsItem("addEntryAt"))
table(bucket) = new Entry(elem, elemHash, oldHead, queue)
count += 1
if (count > threshold) resize()
elem
}
// TODO: remove the `case null` when we can enable explicit nulls in regular compiling,
// since the type `A <: AnyRef` of `elem` can ensure the value is not null.
def put(elem: A): A = (elem: A | Null) match {
case null => throw new NullPointerException("WeakHashSet cannot hold nulls")
case _ =>
Stats.record(statsItem("put"))
removeStaleEntries()
val h = hash(elem)
val bucket = index(h)
val oldHead = table(bucket)
@tailrec
def linkedListLoop(entry: Entry[A] | Null): A = entry match {
case null => addEntryAt(bucket, elem, h, oldHead)
case _ =>
val entryElem = entry.get
if entryElem != null && isEqual(elem, entryElem) then entryElem.uncheckedNN
else linkedListLoop(entry.tail)
}
linkedListLoop(oldHead)
}
def +=(elem: A): Unit = put(elem)
def -=(elem: A): Unit = (elem: A | Null) match {
case null =>
case _ =>
Stats.record(statsItem("-="))
removeStaleEntries()
val bucket = index(hash(elem))
@tailrec
def linkedListLoop(prevEntry: Entry[A] | Null, entry: Entry[A] | Null): Unit =
if entry != null then
val entryElem = entry.get
if entryElem != null && isEqual(elem, entryElem) then remove(bucket, prevEntry, entry)
else linkedListLoop(entry, entry.tail)
linkedListLoop(null, table(bucket))
}
def clear(): Unit = {
table = new Array[Entry[A] | Null](table.size)
threshold = computeThreshold
count = 0
// drain the queue - doesn't do anything because we're throwing away all the values anyway
@tailrec def queueLoop(): Unit = if (queue.poll() != null) queueLoop()
queueLoop()
}
def size: Int = {
removeStaleEntries()
count
}
// Iterator over all the elements in this set in no particular order
override def iterator: Iterator[A] = {
removeStaleEntries()
new collection.AbstractIterator[A] {
/**
* the bucket currently being examined. Initially it's set past the last bucket and will be decremented
*/
private var currentBucket: Int = table.size
/**
* the entry that was last examined
*/
private var entry: Entry[A] | Null = null
/**
* the element that will be the result of the next call to next()
*/
private var lookaheadelement: A | Null = null
@tailrec
def hasNext: Boolean = {
while (entry == null && currentBucket > 0) {
currentBucket -= 1
entry = table(currentBucket)
}
val e = entry
if (e == null) false
else {
lookaheadelement = e.get
if lookaheadelement == null then
// element null means the weakref has been cleared since we last did a removeStaleEntries(), move to the next entry
entry = e.tail
hasNext
else true
}
}
def next(): A =
if lookaheadelement == null then
throw new IndexOutOfBoundsException("next on an empty iterator")
else
val result = lookaheadelement.nn
lookaheadelement = null
entry = entry.nn.tail
result
}
}
protected def statsItem(op: String): String = {
val prefix = "WeakHashSet."
val suffix = getClass.getSimpleName
s"$prefix$op $suffix"
}
/**
* Diagnostic information about the internals of this set. Not normally
* needed by ordinary code, but may be useful for diagnosing performance problems
*/
private[util] class Diagnostics {
/**
* Verify that the internal structure of this hash set is fully consistent.
* Throws an assertion error on any problem. In order for it to be reliable
* the entries must be stable. If any are garbage collected during validation
* then an assertion may inappropriately fire.
*/
def fullyValidate(): Unit = {
var computedCount = 0
var bucket = 0
while (bucket < table.size) {
var entry = table(bucket)
while (entry != null) {
assert(entry.get != null, s"$entry had a null value indicated that gc activity was happening during diagnostic validation or that a null value was inserted")
computedCount += 1
val cachedHash = entry.hash
val realHash = hash(entry.get.uncheckedNN)
assert(cachedHash == realHash, s"for $entry cached hash was $cachedHash but should have been $realHash")
val computedBucket = index(realHash)
assert(computedBucket == bucket, s"for $entry the computed bucket was $computedBucket but should have been $bucket")
entry = entry.tail
}
bucket += 1
}
assert(computedCount == count, s"The computed count was $computedCount but should have been $count")
}
/**
* Produces a diagnostic dump of the table that underlies this hash set.
*/
def dump: String = java.util.Arrays.toString(table.asInstanceOf[Array[AnyRef | Null]])
/**
* Number of buckets that hold collisions. Useful for diagnosing performance issues.
*/
def collisionBucketsCount: Int =
(table count (entry => entry != null && entry.tail != null))
/**
* Number of buckets that are occupied in this hash table.
*/
def fullBucketsCount: Int =
(table count (entry => entry != null))
/**
* Number of buckets in the table
*/
def bucketsCount: Int = table.size
}
private[util] def diagnostics: Diagnostics = new Diagnostics
}
/**
* Companion object for WeakHashSet
*/
object WeakHashSet {
/**
* A single entry in a WeakHashSet. It's a WeakReference plus a cached hash code and
* a link to the next Entry in the same bucket
*/
class Entry[A](@constructorOnly element: A, val hash:Int, var tail: Entry[A] | Null, @constructorOnly queue: ReferenceQueue[A]) extends WeakReference[A](element, queue)
}
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/util/WeakHashSet.scala | Scala | apache-2.0 | 11,085 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.std
trait StdLib extends Library {
val math = MathLib
val structural = StructuralLib
val agg = AggLib
val identity = IdentityLib
val relations = RelationsLib
val set = SetLib
val array = ArrayLib
val string = StringLib
val date = DateLib
}
object StdLib extends StdLib
| djspiewak/quasar | frontend/src/main/scala/quasar/std/std.scala | Scala | apache-2.0 | 907 |
package com.jd.scala.selling.actors
import akka.actor.Actor.Receive
import akka.actor.{ActorLogging, Actor}
import com.jd.scala.selling.data.PersonDAO
import com.jd.scala.selling.model.Person
import com.jd.scala.selling.model.Person._
import scala.util.{Failure, Success}
/**
* Created by justin on 14/08/2014.
*/
class Creater extends Actor with ActorLogging {
override def receive: Receive = {
case p: Person => {
log.info(s"Received Person : ${p}")
PersonDAO.createPerson(p) match {
case Success(person) => {
log.info(""+person)
PersonDAO.readPersonWithId(person._1) match {
case Success(p: Person) => {
log.info("Read Person : " + p)
}
sender() ! p
case Failure(ex: Throwable) => log.info(s"Error Reading Person : ${person.id} : ${ex.getMessage}")
}
}
case Failure(ex) => log.info(s"Error Creating Person : ${p} : ${ex.getMessage}")
}
}
}
}
| justindav1s/learning-scala | src/main/scala/com/jd/scala/selling/actors/Creater.scala | Scala | apache-2.0 | 1,002 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Assert.assertEquals
import org.junit.Assert.assertTrue
import org.scalastyle.Checker
import org.scalastyle.StyleError
import java.util.Set
import org.junit.Before
import org.junit.Test
// scalastyle:off magic.number multiple.string.literals
class IllegalImportsCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "illegal.imports"
val classUnderTest = classOf[IllegalImportsChecker]
@Test def testNone(): Unit = {
val source = """
package foobar
import java.util._
object Foobar {
val foo = 1
}
""";
assertErrors(List(), source)
}
@Test def testDefault(): Unit = {
val source = """package foobar
import java.util._
import sun.com.foobar;
import sun._
object Foobar {
}
""".stripMargin;
assertErrors(List(columnError(4, 0), columnError(5, 0)), source)
}
@Test def testRenamingWildcard(): Unit = {
val source = """package foobar
import java.util.{List => JList}
import java.lang.{Object => JObject}
import java.util.{List,Map}
import java.util.{_}
import java.util._
object Foobar {
}
""".stripMargin;
assertErrors(List(columnError(3, 0), columnError(5, 0), columnError(6, 0), columnError(7, 0)), source, Map("illegalImports" -> "java.util._"))
}
@Test def testRenamingSpecific(): Unit = {
val source = """package foobar
import java.util.{List => JList}
import java.lang.{Object => JObject}
import java.util.{Iterator => JIterator, List => JList, Collection => JCollection}
import java.util.{List, Map}
import java.util.{_}
import java.util._
object Foobar {
}
""".stripMargin;
assertErrors(List(columnError(3, 0), columnError(5, 0), columnError(6, 0)), source,
Map("illegalImports" -> "java.util.List, java.util.Map"))
}
@Test def testWithExemptImports(): Unit = {
val source = """package foobar
import java.util.{List => JList}
import java.lang.{Object => JObject}
import java.util.{Iterator => JIterator, List => JList, Collection => JCollection}
import java.util.{List, Map}
import java.util.{_}
import java.util._
object Foobar {
}
""".stripMargin;
assertErrors(List(columnError(5, 0), columnError(6, 0), columnError(7, 0), columnError(8, 0)), source,
Map("illegalImports" -> "java.util._", "exemptImports" -> "java.util.List"))
}
}
class UnderscoreImportCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "underscore.import"
val classUnderTest = classOf[UnderscoreImportChecker]
@Test def testNone(): Unit = {
val source = """
package foobar
import java.util.List
import java.util._
import java.util.{_}
import java.util.{Foo => Bar, _}
object Foobar {
import scala._
}
""";
assertErrors(List(columnError(5, 0), columnError(6, 0), columnError(7, 0), columnError(10, 2)), source)
}
}
class ImportGroupingCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "import.grouping"
val classUnderTest = classOf[ImportGroupingChecker]
@Test def testKO(): Unit = {
val source = """
package foobar
import java.util.List;
import java.util._ // here is a comment
import java.util._
object Foobar {
import java.util.Map
}
import java.util.Collection
object Barbar {
import java.util.HashMap
}
""";
assertErrors(List(columnError(9, 2), columnError(12, 0), columnError(15, 2)), source)
}
@Test def testNone(): Unit = {
val source = """
package foobar
object Foobar {
}
object Barbar {
}
""";
assertErrors(List(), source)
}
}
| firebase/scalastyle | src/test/scala/org/scalastyle/scalariform/ImportsCheckerTest.scala | Scala | apache-2.0 | 4,316 |
/*
* Copyright 2010-2011 Vilius Normantas <code@norma.lt>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
import org.scalatest.FunSuite
class HistoricalValuesTest extends FunSuite {
test("initial values should be empty") {
val hw = new HistoricalValues({
() => Some(0)
}, {
v: Option[Int] => v.toString
})
expect(Nil) {
hw.values
}
}
test("update") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
hw.update
expect(List(None)) {
hw.values
}
value = Some(15)
hw.update
expect(List(None, Some(15))) {
hw.values
}
hw.update
expect(List(None, Some(15), Some(15))) {
hw.values
}
value = None
hw.update
expect(List(None, Some(15), Some(15), None)) {
hw.values
}
value = Some(-5)
hw.update
expect(List(None, Some(15), Some(15), None, Some(-5))) {
hw.values
}
}
test("size") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(0) {
hw.size
}
value = Some(15)
hw.update
expect(1) {
hw.size
}
hw.update
expect(2) {
hw.size
}
value = None
hw.update
expect(3) {
hw.size
}
}
test("isEmpty") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(true) {
hw.isEmpty
}
value = Some(15)
hw.update
expect(false) {
hw.isEmpty
}
hw.update
expect(false) {
hw.isEmpty
}
value = None
hw.update
expect(false) {
hw.isEmpty
}
}
test("valueAt") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
value = Some(1)
hw.update
value = Some(2)
hw.update
value = Some(3)
hw.update
expect(Some(3)) {
hw.valueAt(0)
}
expect(Some(2)) {
hw.valueAt(1)
}
expect(Some(1)) {
hw.valueAt(2)
}
}
test("valueAt - index out of bounds") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
value = Some(1)
hw.update
value = Some(2)
hw.update
value = Some(3)
hw.update
expect(None) {
hw.valueAt(-100)
}
expect(None) {
hw.valueAt(-2)
}
expect(None) {
hw.valueAt(-1)
}
expect(None) {
hw.valueAt(3)
}
expect(None) {
hw.valueAt(4)
}
expect(None) {
hw.valueAt(100)
}
}
test("valueAt - empty history") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(None) {
hw.valueAt(-100)
}
expect(None) {
hw.valueAt(-2)
}
expect(None) {
hw.valueAt(-1)
}
expect(None) {
hw.valueAt(0)
}
expect(None) {
hw.valueAt(1)
}
expect(None) {
hw.valueAt(2)
}
expect(None) {
hw.valueAt(3)
}
expect(None) {
hw.valueAt(4)
}
expect(None) {
hw.valueAt(100)
}
}
test("last") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(None) {
hw.last
}
value = Some(15)
hw.update
expect(Some(15)) {
hw.last
}
value = Some(5)
hw.update
expect(Some(5)) {
hw.last
}
value = None
hw.update
expect(None) {
hw.last
}
}
test("lastSet") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(None) {
hw.lastSet
}
value = Some(15)
hw.update
expect(Some(15)) {
hw.lastSet
}
value = None
hw.update
expect(Some(15)) {
hw.lastSet
}
value = Some(5)
hw.update
expect(Some(5)) {
hw.lastSet
}
value = None
hw.update
expect(Some(5)) {
hw.lastSet
}
}
test("take") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(Nil) {
hw.take(5)
}
expect(Nil) {
hw.take(2)
}
expect(Nil) {
hw.take(1)
}
expect(Nil) {
hw.take(0)
}
expect(Nil) {
hw.take(-1)
}
value = Some(15)
hw.update
expect(List(Some(15))) {
hw.take(5)
}
expect(List(Some(15))) {
hw.take(2)
}
expect(List(Some(15))) {
hw.take(1)
}
expect(Nil) {
hw.take(0)
}
expect(Nil) {
hw.take(-1)
}
value = Some(5)
hw.update
expect(List(Some(15), Some(5))) {
hw.take(5)
}
expect(List(Some(15), Some(5))) {
hw.take(2)
}
expect(List(Some(5))) {
hw.take(1)
}
expect(Nil) {
hw.take(0)
}
expect(Nil) {
hw.take(-1)
}
value = None
hw.update
expect(List(Some(15), Some(5), None)) {
hw.take(5)
}
expect(List(Some(5), None)) {
hw.take(2)
}
expect(List(None)) {
hw.take(1)
}
expect(Nil) {
hw.take(0)
}
expect(Nil) {
hw.take(-1)
}
value = Some(3)
hw.update
expect(List(Some(15), Some(5), None, Some(3))) {
hw.take(5)
}
expect(List(None, Some(3))) {
hw.take(2)
}
expect(List(Some(3))) {
hw.take(1)
}
expect(Nil) {
hw.take(0)
}
expect(Nil) {
hw.take(-1)
}
}
test("takeSet") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
expect(Nil) {
hw.takeSet(5)
}
expect(Nil) {
hw.takeSet(2)
}
expect(Nil) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
value = None
hw.update
expect(Nil) {
hw.takeSet(5)
}
expect(Nil) {
hw.takeSet(2)
}
expect(Nil) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
value = Some(15)
hw.update
expect(List(Some(15))) {
hw.takeSet(5)
}
expect(List(Some(15))) {
hw.takeSet(2)
}
expect(List(Some(15))) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
value = None
hw.update
expect(List(Some(15))) {
hw.takeSet(5)
}
expect(List(Some(15))) {
hw.takeSet(2)
}
expect(List(Some(15))) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
value = Some(5)
hw.update
expect(List(Some(15), Some(5))) {
hw.takeSet(5)
}
expect(List(Some(15), Some(5))) {
hw.takeSet(2)
}
expect(List(Some(5))) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
value = None
hw.update
expect(List(Some(15), Some(5))) {
hw.takeSet(5)
}
expect(List(Some(15), Some(5))) {
hw.takeSet(2)
}
expect(List(Some(5))) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
value = Some(3)
hw.update
expect(List(Some(15), Some(5), Some(3))) {
hw.takeSet(5)
}
expect(List(Some(5), Some(3))) {
hw.takeSet(2)
}
expect(List(Some(3))) {
hw.takeSet(1)
}
expect(Nil) {
hw.takeSet(0)
}
expect(Nil) {
hw.takeSet(-1)
}
}
test("truncate") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.toString
})
hw.truncate(0)
expect(Nil) {
hw.values
}
value = Some(5)
hw.update
value = None
hw.update
value = Some(7)
hw.update
value = Some(8)
hw.update
expect(List(Some(5), None, Some(7), Some(8))) {
hw.values
}
hw.truncate(5)
expect(List(Some(5), None, Some(7), Some(8))) {
hw.values
}
hw.truncate(4)
expect(List(Some(5), None, Some(7), Some(8))) {
hw.values
}
hw.truncate(3)
expect(List(None, Some(7), Some(8))) {
hw.values
}
hw.truncate(1)
expect(List(Some(8))) {
hw.values
}
hw.truncate(0)
expect(Nil) {
hw.values
}
}
test("valuesToStrings") {
var value: Option[Int] = None
val hw = new HistoricalValues({
() => value
}, {
v: Option[Int] => v.map("#" + _.toString).getOrElse("-")
})
expect(Nil) {
hw.valuesToStrings
}
value = Some(5)
hw.update
value = None
hw.update
value = Some(7)
hw.update
value = Some(8)
hw.update
expect(List("#5", "-", "#7", "#8")) {
hw.valuesToStrings
}
}
}
| ViliusN/Crossbow | crossbow-core/test/lt/norma/crossbow/indicators/HistoricalValuesTest.scala | Scala | gpl-3.0 | 9,913 |
package scala
package object cli {
import org.scalactic._
def readProperty(key: String): String Or ErrorMessage= {
val prompt = s"$key: "
JLineReader.readLine(prompt) match {
case None => Bad(s"Invalid input. A value for '$key' is required.")
case Some(value) if value.trim.isEmpty => Bad(s"Invalid input. A value for '$key' is required.")
case Some(value) => Good(value)
}
}
def readProperty(key: String, default: String): String = {
val prompt = s"$key [$default]: "
JLineReader.readLine(prompt) match {
case None => default
case Some(value) if value.trim.isEmpty => default
case Some(value) => value
}
}
def readConfirmation(prompt: String): Boolean Or ErrorMessage = {
JLineReader.readLine(s"$prompt (y/n)? [n] ") match {
case None => Good(false)
case Some("y") | Some("Y") => Good(true)
case Some("n") | Some("N") => Good(false)
case Some(_) => Bad("Invalid choice. Select 'y' or 'n'.")
}
}
def readByIndex[A](as: Seq[A], prompt: String, conv: A => String): A Or ErrorMessage = {
def parseInput(s: String): Option[Int] = util.control.Exception.catching(classOf[NumberFormatException]).opt(s.toInt)
val asByIndex = as.zipWithIndex
JLineReader.readLine(
s"""
|$prompt
|${asByIndex.map { case (a, i) => s"[ $i ] ${conv(a)}" }.mkString("\\n") }
|Enter index number:
""".stripMargin.trim + " "
).flatMap(parseInput) match {
case Some(n) if n < as.size => Good(as(n))
case Some(_) | None => Bad("Invalid choice. Select by index number.")
}
}
}
| ngorongoro/habilis | cli/src/main/scala/cli/package.scala | Scala | unlicense | 1,622 |
/* Copyright (C) 2008-2010 Univ of Massachusetts Amherst, Computer Science Dept
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://code.google.com/p/factorie/
This software is provided under the terms of the Eclipse Public License 1.0
as published by http://www.opensource.org. For further information,
see the file `LICENSE.txt' included with this distribution. */
package cc.factorie
import scala.collection.mutable.{HashSet,HashMap,ArrayBuffer}
// Preliminary steps toward generic interfaces to inference
// Eventually we will have marginals over factors instead of variables
// Generic over sampling-based inference and variational inference
// TODO Not yet sure what general interfaces should go here.
trait Marginal
trait Lattice
trait Inferencer[V<:Variable,C] {
type LatticeType <: Lattice
/** Infer the target 'variables' using 'varying' to drive the exploration of changes to the configuration.
For example, in SamplingInference, 'varying' are Sampler 'contexts' which do not necessarily have to be Variables;
they are arbitrary keys to the sampler that may drive exploration, which will eventually result in changes to the 'target' variables.
If for you this separation of target variables and sampling contexts is unnecessary or confusing, consider VariableInferencer instead.
@author Andrew McCallum
@since 0.8
*/
def infer(variables:Collection[V], varying:Collection[C]): LatticeType
//def infer(factors:TemplateList[VectorTemplate], variables:Collection[V]): LatticeType
}
/** An Inferencer in which the context arguments are Variables.
@author Andrew McCallum
@since 0.8
*/
trait VariableInferencer[V<:Variable] extends Inferencer[V,V] {
/** Infer the 'variables', changing their values, but not other variables' (except perhaps through variable-value coordination). */
def infer(variables:Collection[V]): LatticeType = infer(variables, variables)
/** Infer the 'targets' variables, considering not only changes their their values, but also changes to the 'marginalizing' variables. */
def inferMarginalizing(targets:Collection[V], marginalizing:Collection[V]) = infer(targets, { val a = new ArrayBuffer[V]; a ++= targets; a ++= marginalizing; a})
}
trait Maximizer[V<:Variable] extends Inferencer[V,V] // Include something like this?
// 'infer' here would actually change state to the maximum found
// 'infer' in Inferencer would leave it in some random state, with the results really in the Marginal objects?
// TODO Something like this also?? Where will optimizers like CongugateGradient and BFGS go?
trait Optimizer {
def optimize: Unit
def optimize(numIterations:Int): Unit
}
/** Perform inference according to belief propagation.
@author Andrew McCallum
@author Tim Vieira
@since 0.8
*/
class BPInferencer[V<:BeliefPropagation.BPVariable](model:Model) extends VariableInferencer[V] {
override type LatticeType = BPLattice
def infer(variables:Collection[V], varying:Collection[V]): LatticeType = infer(variables, varying, 4) // TODO Make a more sensible default
def infer(variables:Collection[V], varying:Collection[V], numIterations:Int): LatticeType = {
val result = new BPLattice(model,varying)
result.update(numIterations) // TODO Of course make this smarter later
result.setVariablesToMax(variables) // For now, just inference my marginal maximization
// NOTE the above line requires that 'variables' is a subset of varying, of course!
result
}
def infer(variables:Collection[V], numIterations:Int): LatticeType = infer(variables, variables, numIterations)
// waiting for Scala 2.8 default parameters...
def inferTreewise(variables:Collection[V], varying:Collection[V]): LatticeType = inferTreewise(variables, varying, 1)
def inferTreewise(variables:Collection[V], varying:Collection[V], maxiterations:Int): LatticeType = {
// NOTE: 'variables' must be a subset of varying, of course!
val result = new BPLattice(model,varying)
var i = 0
do {
BeliefPropagation.maxdiff = 0
result.updateTreewise
//result.update
//println("iteration %s: max-message-diff %s".format(i,BeliefPropagation.maxdiff))
i += 1
} while (BeliefPropagation.maxdiff > 0.00000001 && i < maxiterations)
if (i >= maxiterations && maxiterations > 1) {
println("\n\033[31mWARNING\033[0m: loopy BP did not converge in <= %s iterations".format(i))
}
result.setVariablesToMax(variables)
result
}
def inferTreewise(variables:Collection[V]): LatticeType = inferTreewise(variables, variables, 1)
def inferTreewise(variables:Collection[V], maxiterations:Int): LatticeType = inferTreewise(variables, variables, maxiterations)
}
/** BruteForce searches for the optimal configuration of a Collection of Variables
by doing exhaustive enumeration or all possible configurations.
@author Tim Vieira
*/
class BruteForce[V<:DiscreteVariable with NoVariableCoordination](model:Model) {
// TODO: make this conform to some of the existing Inferencer interfaces.
// extends VariableInferencer[V]?
def infer(variables:Seq[V]): Unit = {
assert(variables.forall(_.isInstanceOf[V]))
// Argmax over all combinations of variable values
val iterators = variables.map(v2 => v2.asInstanceOf[V].settings).toList
iterators.foreach(setting => {setting.reset; setting.next}) // reset each iterator and advance to first setting.
var score = 0.0
var best_score = Math.NEG_INF_DOUBLE
var best = null.asInstanceOf[Config]
do {
score = model.score(variables)
if (score > best_score || best == null) {
best = new Config(variables) // snapshot the variable configuration
best_score = score
}
} while (nextValues(iterators))
best.load // leave the variables in the best configuration found.
}
class Config(val variables:Seq[V]) {
// remember the settings of each variable
val settings = new Array[Int](variables.size)
for ((v,i) <- variables.toList.zipWithIndex) { settings(i) = v.intValue }
// set the values of each variable to that in this configuration
def load = for ((v,k) <- variables.toList.zip(settings.toList)) v.set(k)(null)
override def toString = ("Config(" + settings.toList.toString + ")")
}
/** Iterate through all combinations of values in Variables given their `SettingIterators */
private def nextValues(vs: List[IterableSettings#SettingIterator]): Boolean = {
if (vs == Nil) false
else if (vs.first.hasNext) {vs.first.next; true}
else if (vs.tail != Nil) {vs.first.reset; vs.first.next; nextValues(vs.tail)}
else false
}
}
| andrewmilkowski/factorie | src/main/scala/cc/factorie/Inferencer.scala | Scala | epl-1.0 | 6,718 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_13.scalatest3_0_8
import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTest2GoToSourceTest
class Scalatest2_13_3_0_8_GoToSourceTest extends Scalatest2_13_3_0_8_Base with ScalaTest2GoToSourceTest
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_13/scalatest3_0_8/Scalatest2_13_3_0_8_GoToSourceTest.scala | Scala | apache-2.0 | 277 |
/*
* Copyright 2012 OneCalendar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dao
import framework.MongoConnectionProperties
import MongoConnectionProperties._
import framework.MongoOperations
import org.joda.time.DateTime
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuite}
import models.{EventMongoMapper, Event}
import dao.EventRepository._
import com.github.simplyscala.MongoEmbedDatabase
import com.mongodb.casbah.Imports._
import com.mongodb.ServerAddress
import com.mongodb.casbah.TypeImports.MongoOptions
import com.github.simplyscala.MongodProps
class EventDaoTest extends FunSuite with ShouldMatchers with MongoEmbedDatabase with BeforeAndAfterAll with BeforeAndAfter {
var mongoProps: MongodProps = null
override def beforeAll() { mongoProps = mongoStart(27018) }
override def afterAll() { mongoStop(mongoProps) }
before { EventDaoCleaner.drop() }
object EventDaoCleaner extends MongoOperations with EventMongoMapper {
def drop()(implicit dbName: MongoDbName, connection: MongoDB) = delete(MongoDBObject())
}
implicit val dbName: MongoDbName = "test"
implicit val connection: MongoDB = {
val connection: MongoConnection = {
val options: MongoOptions = new MongoOptions()
options.setConnectionsPerHost(2)
MongoConnection(new ServerAddress("127.0.0.1", 27018), options)
}
connection(dbName)
}
test("saving a new event") {
val event: Event = Event(
uid = "1",
title = "BOF",
begin = new DateTime(2012, 4, 19, 0, 0, 0, 0),
end = new DateTime(2012, 4, 19, 0, 0, 0, 0),
description = "",
location = "",
tags = List("JAVA", "DEVOXX")
)
EventDao.saveEvent(event)
EventDao.findAll should be(List(event))
}
test("should find event by tag 'devoxx'") {
implicit val now: () => Long = () => new DateTime(2010, 1, 1, 1, 1).getMillis
initData()
EventDao.findByTag(List("devoxx")) should be(List(eventDevoxx))
}
test("should find events by tags 'devoxx' or 'java' ") {
implicit val now: () => Long = () => new DateTime(2010, 1, 1, 1, 1).getMillis
initData()
EventDao.findByTag(List("devoxx", "java")).map(_.tags).flatten.sorted should be(List("JAVA", "DEVOXX").sorted)
}
test("should find event even if it have not originalStream and url") {
implicit val now: () => Long = () => new DateTime(2010, 1, 1, 1, 1).getMillis
val eventWithNoOrigStreamAndUrl =
Event("uid", "title", DateTime.now().plusDays(1), DateTime.now().plusDays(2), "location", "description", tags = List("TEST"))
EventDao.saveEvent(eventWithNoOrigStreamAndUrl)
EventDao.findByTag(List("test")) should be(List(eventWithNoOrigStreamAndUrl))
EventDao.findAll() should be(List(eventWithNoOrigStreamAndUrl))
}
test("should not fail when event found without uid but which is in database") {
val now: DateTime = new DateTime(2012, 1, 1, 1, 1)
implicit val fnow: () => Long = () => new DateTime(2010, 1, 1, 1, 1).getMillis
EventDao.saveEvent(Event(uid = null, tags = List("NO_UID"), begin = now, end = now))
EventDao.findByTag(List("NO_UID")) should be(List(Event(uid = "", tags = List("NO_UID"), begin = now, end = now)))
}
test("should find 4 first events by tags 'devoxx', 'java' or other ") {
implicit val now: () => Long = () => new DateTime(2010, 1, 1, 1, 1).getMillis
initFiveData()
EventDao.findPreviewByTag(List("devoxx", "java", "other")).eventList should have size 4
EventDao.findPreviewByTag(List("devoxx", "java", "other")).eventList should be(List(eventJava, eventDevoxx, eventOther, event4))
}
test("should not return past events") {
implicit val now: () => Long = () => new DateTime(2012, 4, 20, 0, 0, 0, 0).getMillis
initFourData()
EventDao.findPreviewByTag(List("devoxx", "java", "other")).eventList should have size 3
EventDao.findPreviewByTag(List("devoxx", "java", "other")).eventList.map(_.begin.getMillis).foreach(_ should be >= (now()))
}
test("should find everything") {
(1 to 50).foreach(
id => EventDao.saveEvent(
Event(
uid = id.toString,
title = id.toString,
begin = new DateTime,
end = new DateTime,
tags = Nil
)
)
)
EventDao.findAll should have size 50
}
test("should find all events from now") {
implicit val now = () => DateTime.now.getMillis
EventDao.saveEvent(oldEvent)
EventDao.saveEvent(newEvent)
EventDao.findAllFromNow() should be(List(newEvent))
}
test("should not list old tags") {
implicit val now: () => Long = () => new DateTime(2012, 5, 1, 1, 1).getMillis
EventDao.saveEvent(oldEvent)
EventDao.saveEvent(newEvent)
val tags: List[String] = EventDao.listTags()
tags should be(List("NEW"))
}
test("delete by originalStream will drop all") {
implicit val now: () => Long = () => DateTime.now.getMillis
EventDao.saveEvent(Event(
originalStream = Option("hello"),
begin = new DateTime().plusDays(10),
end = new DateTime().plusDays(10),
title = "title",
description = "description",
tags = List("tag1", "tag2")
))
EventDao.saveEvent(Event(
originalStream = Option("hello"),
begin = new DateTime().plusDays(10),
end = new DateTime().plusDays(10),
title = "title2",
description = "description2",
tags = List("tag1", "tag2")
))
EventDao.saveEvent(Event(
originalStream = Option("hello"),
begin = new DateTime().minusDays(10),
end = new DateTime().minusDays(10),
title = "title",
description = "description",
tags = List("tag1", "tag2")
))
initData()
EventDao.findAll() should have size 5
EventDao.deleteByOriginalStream("hello")
EventDao.findAll() should have size 2
}
test("current events or next ones") {
import scala.concurrent.duration._
implicit val now: () => Long = () => new DateTime(2012, 4, 21, 15, 0).getMillis
initFourData()
val closestEvents: List[Event] = EventDao.closestEvents(offset = 5, afterset = 2)
closestEvents should have size 2
closestEvents.map(_.begin.getMillis).foreach(_.should(be <= (now() + (2 hours).toMillis) or be >= (now() - (5 minutes).toMillis)))
}
test("current events or next ones with tag 'devoxx'") {
import scala.concurrent.duration._
implicit val now: () => Long = () => new DateTime(2012, 4, 20, 10, 0).getMillis
initFourData()
val closestEvents: List[Event] = EventDao.closestEvents(offset = 5, afterset = 2, tags = List("devoxx"))
closestEvents should have size 1
closestEvents.map(_.begin.getMillis).foreach(_.should(be <= (now() + (2 hours).toMillis) or be >= (now() - (5 minutes).toMillis)))
}
test("count futur events") {
implicit val now: () => Long = () => new DateTime(2012, 5, 1, 1, 1).getMillis
EventDao.saveEvent(oldEvent)
EventDao.saveEvent(newEvent)
EventDao.countFutureEvents should be(1)
}
test("should find events by tags or event id") {
implicit val now: () => Long = () => new DateTime(2011, 5, 1, 1, 1).getMillis
val tags = List("OTHER", "JAVA")
val ids = List("NEW")
initFiveData()
EventDao.findByIdsAndTags(ids, tags).map(e => (e.uid, e.tags)) should be(List(newEvent, eventJava, eventOther, event4).map(e => (e.uid, e.tags)))
}
private def initData() {
EventDao.saveEvent(eventDevoxx)
EventDao.saveEvent(eventJava)
}
private def initFourData() {
EventDao.saveEvent(eventDevoxx)
EventDao.saveEvent(eventJava)
EventDao.saveEvent(eventOther)
EventDao.saveEvent(event4)
}
private def initFiveData() {
EventDao.saveEvent(eventDevoxx)
EventDao.saveEvent(eventJava)
EventDao.saveEvent(eventOther)
EventDao.saveEvent(event4)
EventDao.saveEvent(newEvent)
}
}
object EventRepository {
val eventDevoxx: Event = Event(
uid = "1",
title = "BOF",
begin = new DateTime(2012, 4, 20, 10, 0, 0, 0),
end = new DateTime(2012, 4, 20, 11, 0, 0, 0),
tags = List("DEVOXX")
)
val eventJava: Event = Event(
uid = "2",
title = "BOF",
begin = new DateTime(2012, 4, 19, 10, 0, 0, 0),
end = new DateTime(2012, 4, 19, 11, 0, 0, 0),
tags = List("JAVA")
)
val eventOther: Event = Event(
uid = "3",
title = "BOF",
begin = new DateTime(2012, 4, 21, 15, 0, 0, 0),
end = new DateTime(2012, 4, 21, 16, 0, 0, 0),
tags = List("OTHER")
)
val event4: Event = Event(
uid = "4",
title = "BOF",
begin = new DateTime(2012, 4, 21, 15, 0, 0, 0),
end = new DateTime(2012, 4, 21, 16, 0, 0, 0),
tags = List("4", "OTHER")
)
val oldEvent: Event = Event(
uid = "4",
title = "BOF",
begin = new DateTime(2012, 4, 21, 15, 0, 0, 0),
end = new DateTime(2012, 4, 21, 16, 0, 0, 0),
tags = List("4", "OTHER")
)
val newEvent: Event = Event(
uid = "NEW",
title = "NEW",
begin = new DateTime().plusDays(10),
end = new DateTime().plusDays(10),
tags = List("NEW")
)
}
| OneCalendar/OneCalendar | test/dao/EventDaoTest.scala | Scala | apache-2.0 | 10,390 |
/*
* Copyright (c) 2015, Nightfall Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package moe.nightfall.instrumentality.mc1710.gui
import moe.nightfall.instrumentality.editor.{EditElement, UIUtils}
import net.minecraft.client.Minecraft
import net.minecraft.client.gui.GuiScreen
import org.lwjgl.opengl.{Display, GL11};
object EditorHostGui {
var hostedElement: EditElement = null
}
/**
* Created on 18/08/15.
*/
class EditorHostGui extends GuiScreen {
var lastTime = System.currentTimeMillis()
override def initGui() {
// Only comment the if (hostedElement == null) for testing!
// If commented, a resource leak happens! --gamemanj
if (EditorHostGui.hostedElement == null)
changePanel(UIUtils.createGui())
}
override def setWorldAndResolution(mc: Minecraft, width: Int, height: Int) {
super.setWorldAndResolution(mc, width, height)
if (EditorHostGui.hostedElement != null)
EditorHostGui.hostedElement.setSize(Display.getWidth(), Display.getHeight())
}
override def drawScreen(xCoord: Int, yCoord: Int, partialTick: Float) {
// Here's why the FPS was so low - we were limiting ourselves to MC time.
// Which looks AWFUL.
val thisTime = System.currentTimeMillis()
val deltaTime = thisTime - lastTime
lastTime = thisTime
UIUtils.update(EditorHostGui.hostedElement)
EditorHostGui.hostedElement.update(deltaTime / 1000f)
GL11.glDisable(GL11.GL_TEXTURE_2D)
GL11.glShadeModel(GL11.GL_SMOOTH)
GL11.glMatrixMode(GL11.GL_PROJECTION)
GL11.glLoadIdentity()
GL11.glOrtho(0, Display.getWidth, Display.getHeight, 0, 0, 1024)
GL11.glPushMatrix()
GL11.glMatrixMode(GL11.GL_MODELVIEW)
GL11.glPushMatrix()
GL11.glLoadIdentity()
UIUtils.prepareForDrawing(Display.getWidth, Display.getHeight)
GL11.glEnable(GL11.GL_SCISSOR_TEST)
EditorHostGui.hostedElement.draw()
GL11.glDisable(GL11.GL_SCISSOR_TEST)
GL11.glPopMatrix()
GL11.glMatrixMode(GL11.GL_PROJECTION)
GL11.glPopMatrix()
GL11.glMatrixMode(GL11.GL_MODELVIEW)
GL11.glShadeModel(GL11.GL_FLAT)
GL11.glEnable(GL11.GL_TEXTURE_2D)
}
def changePanel(newPanel: EditElement) {
if (EditorHostGui.hostedElement != null)
EditorHostGui.hostedElement.cleanup()
EditorHostGui.hostedElement = newPanel
EditorHostGui.hostedElement.setSize(width, height)
}
}
| Nightfall/Instrumentality | mc1710/src/main/scala/moe/nightfall/instrumentality/mc1710/gui/EditorHostGui.scala | Scala | bsd-2-clause | 3,780 |
package com.github.shadowsocks.acl
import java.net.InetAddress
import java.util.Objects
import com.github.shadowsocks.utils.Utils
/**
* @author Mygod
*/
@throws[IllegalArgumentException]
class Subnet(val address: InetAddress, val prefixSize: Int) extends Comparable[Subnet] {
private def addressLength = address.getAddress.length << 3
if (prefixSize < 0 || prefixSize > addressLength) throw new IllegalArgumentException
override def toString: String =
if (prefixSize == addressLength) address.getHostAddress else address.getHostAddress + '/' + prefixSize
override def compareTo(that: Subnet): Int = {
val addrThis = address.getAddress
val addrThat = that.address.getAddress
var result = addrThis lengthCompare addrThat.length // IPv4 address goes first
if (result != 0) return result
for ((x, y) <- addrThis zip addrThat) {
result = (x & 0xFF) compare (y & 0xFF) // undo sign extension of signed byte
if (result != 0) return result
}
prefixSize compare that.prefixSize
}
override def equals(other: Any): Boolean = other match {
case that: Subnet => address == that.address && prefixSize == that.prefixSize
case _ => false
}
override def hashCode: Int = Objects.hash(address, prefixSize: Integer)
}
object Subnet {
@throws[IllegalArgumentException]
def fromString(value: String): Subnet = {
val parts = value.split("/")
val addr = Utils.parseNumericAddress(parts(0))
parts.length match {
case 1 => new Subnet(addr, addr.getAddress.length << 3)
case 2 => new Subnet(addr, parts(1).toInt)
case _ => throw new IllegalArgumentException()
}
}
}
| hangox/shadowsocks-android | mobile/src/main/scala/com/github/shadowsocks/acl/Subnet.scala | Scala | gpl-3.0 | 1,659 |
package io.hydrosphere.mist.master
import io.hydrosphere.mist.jobs.JobDetails
case class JobExecutionStatus(
id: String,
namespace: String,
startTime: Option[Long] = None,
endTime: Option[Long] = None,
status: JobDetails.Status = JobDetails.Status.Initialized
)
| KineticCookie/mist | src/main/scala/io/hydrosphere/mist/master/JobExecutionStatus.scala | Scala | apache-2.0 | 274 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.csv
import java.io.IOException
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import com.databricks.spark.csv.newapi.CarbonTextFile
import com.databricks.spark.csv.util._
import com.databricks.spark.sql.readers._
import org.apache.commons.csv._
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.sources.{BaseRelation, InsertableRelation, TableScan}
import org.apache.spark.sql.types._
import org.slf4j.LoggerFactory
import org.apache.carbondata.processing.etl.DataLoadingException
case class CarbonCsvRelation protected[spark] (
location: String,
useHeader: Boolean,
delimiter: Char,
quote: Char,
escape: Character,
comment: Character,
parseMode: String,
parserLib: String,
ignoreLeadingWhiteSpace: Boolean,
ignoreTrailingWhiteSpace: Boolean,
userSchema: StructType = null,
charset: String = TextFile.DEFAULT_CHARSET.name(),
inferCsvSchema: Boolean)(@transient val sqlContext: SQLContext)
extends BaseRelation with TableScan with InsertableRelation {
/**
* Limit the number of lines we'll search for a header row that isn't comment-prefixed.
*/
private val MAX_COMMENT_LINES_IN_HEADER = 10
private val logger = LoggerFactory.getLogger(CarbonCsvRelation.getClass)
// Parse mode flags
if (!ParseModes.isValidMode(parseMode)) {
logger.warn(s"$parseMode is not a valid parse mode. Using ${ParseModes.DEFAULT}.")
}
if((ignoreLeadingWhiteSpace || ignoreLeadingWhiteSpace) && ParserLibs.isCommonsLib(parserLib)) {
logger.warn(s"Ignore white space options may not work with Commons parserLib option")
}
private val failFast = ParseModes.isFailFastMode(parseMode)
private val dropMalformed = ParseModes.isDropMalformedMode(parseMode)
private val permissive = ParseModes.isPermissiveMode(parseMode)
val schema = inferSchema()
def tokenRdd(header: Array[String]): RDD[Array[String]] = {
val baseRDD = CarbonTextFile.withCharset(sqlContext.sparkContext, location, charset)
if(ParserLibs.isUnivocityLib(parserLib)) {
univocityParseCSV(baseRDD, header)
} else {
val csvFormat = CSVFormat.DEFAULT
.withDelimiter(delimiter)
.withQuote(quote)
.withEscape(escape)
.withSkipHeaderRecord(false)
.withHeader(header: _*)
.withCommentMarker(comment)
// If header is set, make sure firstLine is materialized before sending to executors.
val filterLine = if (useHeader) firstLine else null
baseRDD.mapPartitions { iter =>
// When using header, any input line that equals firstLine is assumed to be header
val csvIter = if (useHeader) {
iter.filter(_ != filterLine)
} else {
iter
}
parseCSV(csvIter, csvFormat)
}
}
}
// By making this a lazy val we keep the RDD around, amortizing the cost of locating splits.
def buildScan: RDD[Row] = {
val schemaFields = schema.fields
tokenRdd(schemaFields.map(_.name)).flatMap{ tokens =>
if (dropMalformed && schemaFields.length != tokens.size) {
logger.warn(s"Dropping malformed line: $tokens")
None
} else if (failFast && schemaFields.length != tokens.size) {
throw new RuntimeException(s"Malformed line in FAILFAST mode: $tokens")
} else {
var index: Int = 0
val rowArray = new Array[Any](schemaFields.length)
try {
index = 0
while (index < schemaFields.length) {
val field = schemaFields(index)
rowArray(index) = TypeCast.castTo(tokens(index), field.dataType, field.nullable)
index = index + 1
}
Some(Row.fromSeq(rowArray))
} catch {
case aiob: ArrayIndexOutOfBoundsException if permissive =>
(index until schemaFields.length).foreach(ind => rowArray(ind) = null)
Some(Row.fromSeq(rowArray))
}
}
}
}
private def inferSchema(): StructType = {
if (this.userSchema != null) {
userSchema
} else {
val firstRow = if (ParserLibs.isUnivocityLib(parserLib)) {
val escapeVal = if (escape == null) '\\\\' else escape.charValue()
val commentChar: Char = if (comment == null) '\\0' else comment
new LineCsvReader(fieldSep = delimiter, quote = quote, escape = escapeVal,
commentMarker = commentChar).parseLine(firstLine)
} else {
val csvFormat = CSVFormat.DEFAULT
.withDelimiter(delimiter)
.withQuote(quote)
.withEscape(escape)
.withSkipHeaderRecord(false)
CSVParser.parse(firstLine, csvFormat).getRecords.get(0).asScala.toArray
}
if(null == firstRow) {
throw new DataLoadingException("First line of the csv is not valid.")
}
val header = if (useHeader) {
firstRow
} else {
firstRow.zipWithIndex.map { case (value, index) => s"C$index"}
}
if (this.inferCsvSchema) {
InferSchema(tokenRdd(header), header)
} else {
// By default fields are assumed to be StringType
val schemaFields = header.map { fieldName =>
StructField(fieldName.toString, StringType, nullable = true)
}
StructType(schemaFields)
}
}
}
/**
* Returns the first line of the first non-empty file in path
*/
private lazy val firstLine = {
val csv = CarbonTextFile.withCharset(sqlContext.sparkContext, location, charset)
if (comment == null) {
csv.first()
} else {
csv.take(MAX_COMMENT_LINES_IN_HEADER)
.find(x => !StringUtils.isEmpty(x) && !x.startsWith(comment.toString))
.getOrElse(sys.error(s"No uncommented header line in " +
s"first $MAX_COMMENT_LINES_IN_HEADER lines"))
}
}
private def univocityParseCSV(
file: RDD[String],
header: Seq[String]): RDD[Array[String]] = {
// If header is set, make sure firstLine is materialized before sending to executors.
val filterLine = if (useHeader) firstLine else null
val dataLines = if (useHeader) file.filter(_ != filterLine) else file
val rows = dataLines.mapPartitionsWithIndex({
case (split, iter) =>
val escapeVal = if (escape == null) '\\\\' else escape.charValue()
val commentChar: Char = if (comment == null) '\\0' else comment
new CarbonBulkCsvReader(iter, split,
headers = header, fieldSep = delimiter,
quote = quote, escape = escapeVal, commentMarker = commentChar,
ignoreLeadingSpace = ignoreLeadingWhiteSpace,
ignoreTrailingSpace = ignoreTrailingWhiteSpace)
}, true)
rows
}
private def parseCSV(
iter: Iterator[String],
csvFormat: CSVFormat): Iterator[Array[String]] = {
iter.flatMap { line =>
try {
val records = CSVParser.parse(line, csvFormat).getRecords
if (records.isEmpty) {
logger.warn(s"Ignoring empty line: $line")
None
} else {
Some(records.get(0).asScala.toArray)
}
} catch {
case NonFatal(e) if !failFast =>
logger.error(s"Exception while parsing line: $line. ", e)
None
}
}
}
// The function below was borrowed from JSONRelation
override def insert(data: DataFrame, overwrite: Boolean): Unit = {
val filesystemPath = new Path(location)
val fs = filesystemPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration)
if (overwrite) {
try {
fs.delete(filesystemPath, true)
} catch {
case e: IOException =>
throw new IOException(
s"Unable to clear output directory ${filesystemPath.toString} prior"
+ s" to INSERT OVERWRITE a CSV table:\\n${e.toString}")
}
// Write the data. We assume that schema isn't changed, and we won't update it.
data.saveAsCsvFile(location, Map("delimiter" -> delimiter.toString))
} else {
sys.error("CSV tables only support INSERT OVERWRITE for now.")
}
}
}
| foryou2030/incubator-carbondata | integration/spark/src/main/scala/org/apache/carbondata/spark/csv/CarbonCsvRelation.scala | Scala | apache-2.0 | 8,959 |
package lib.neo4j.error
abstract class Neo4jError(msg: String, cause: Throwable = null)
extends RuntimeException(msg, cause)
case class ServiceError(msg: String, cause: Throwable = null)
extends Neo4jError(msg, cause)
case class ServerError(msg: String, cause: Throwable = null)
extends Neo4jError(msg, cause) | fynnfeldpausch/frame | app/lib/neo4j/error/Neo4jError.scala | Scala | mit | 318 |
package cromwell.backend.async
import cromwell.backend.BackendJobDescriptor
import cromwell.backend.async.AsyncBackendJobExecutionActor.JobId
import cromwell.core.path.Path
import cromwell.core.{CallOutputs, ExecutionEvent}
/**
* Trait to encapsulate whether an execution is complete and if so provide a result. Useful in conjunction
* with the `poll` API to feed results of previous job status queries forward.
*/
sealed trait ExecutionHandle {
def isDone: Boolean
def result: ExecutionResult
}
final case class PendingExecutionHandle[BackendJobId <: JobId, BackendRunInfo, BackendRunStatus]
(
jobDescriptor: BackendJobDescriptor,
pendingJob: BackendJobId,
runInfo: Option[BackendRunInfo],
previousStatus: Option[BackendRunStatus]
) extends ExecutionHandle {
override val isDone = false
override val result = NonRetryableExecution(new IllegalStateException("PendingExecutionHandle cannot yield a result"))
}
final case class SuccessfulExecutionHandle(outputs: CallOutputs, returnCode: Int, jobDetritusFiles: Map[String, Path], executionEvents: Seq[ExecutionEvent], resultsClonedFrom: Option[BackendJobDescriptor] = None) extends ExecutionHandle {
override val isDone = true
override val result = SuccessfulExecution(outputs, returnCode, jobDetritusFiles, executionEvents, resultsClonedFrom)
}
final case class FailedNonRetryableExecutionHandle(throwable: Throwable, returnCode: Option[Int] = None) extends ExecutionHandle {
override val isDone = true
override val result = NonRetryableExecution(throwable, returnCode)
}
final case class FailedRetryableExecutionHandle(throwable: Throwable, returnCode: Option[Int] = None) extends ExecutionHandle {
override val isDone = true
override val result = RetryableExecution(throwable, returnCode)
}
case object AbortedExecutionHandle extends ExecutionHandle {
override def isDone: Boolean = true
override def result: ExecutionResult = AbortedExecution
}
| ohsu-comp-bio/cromwell | backend/src/main/scala/cromwell/backend/async/ExecutionHandle.scala | Scala | bsd-3-clause | 1,940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import scala.collection.immutable.{HashMap, TreeMap}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{GenericArrayData, TypeUtils}
import org.apache.spark.sql.types._
object PivotFirst {
def supportsDataType(dataType: DataType): Boolean = updateFunction.isDefinedAt(dataType)
// Currently UnsafeRow does not support the generic update method (throws
// UnsupportedOperationException), so we need to explicitly support each DataType.
private val updateFunction: PartialFunction[DataType, (InternalRow, Int, Any) => Unit] = {
case DoubleType =>
(row, offset, value) => row.setDouble(offset, value.asInstanceOf[Double])
case IntegerType =>
(row, offset, value) => row.setInt(offset, value.asInstanceOf[Int])
case LongType =>
(row, offset, value) => row.setLong(offset, value.asInstanceOf[Long])
case FloatType =>
(row, offset, value) => row.setFloat(offset, value.asInstanceOf[Float])
case BooleanType =>
(row, offset, value) => row.setBoolean(offset, value.asInstanceOf[Boolean])
case ShortType =>
(row, offset, value) => row.setShort(offset, value.asInstanceOf[Short])
case ByteType =>
(row, offset, value) => row.setByte(offset, value.asInstanceOf[Byte])
case d: DecimalType =>
(row, offset, value) => row.setDecimal(offset, value.asInstanceOf[Decimal], d.precision)
}
}
/**
* PivotFirst is an aggregate function used in the second phase of a two phase pivot to do the
* required rearrangement of values into pivoted form.
*
* For example on an input of
* A | B
* --+--
* x | 1
* y | 2
* z | 3
*
* with pivotColumn=A, valueColumn=B, and pivotColumnValues=[z,y] the output is [3,2].
*
* @param pivotColumn column that determines which output position to put valueColumn in.
* @param valueColumn the column that is being rearranged.
* @param pivotColumnValues the list of pivotColumn values in the order of desired output. Values
* not listed here will be ignored.
*/
case class PivotFirst(
pivotColumn: Expression,
valueColumn: Expression,
pivotColumnValues: Seq[Any],
mutableAggBufferOffset: Int = 0,
inputAggBufferOffset: Int = 0) extends ImperativeAggregate {
override val children: Seq[Expression] = pivotColumn :: valueColumn :: Nil
override val nullable: Boolean = false
val valueDataType = valueColumn.dataType
override val dataType: DataType = ArrayType(valueDataType)
val pivotIndex: Map[Any, Int] = if (pivotColumn.dataType.isInstanceOf[AtomicType]) {
HashMap(pivotColumnValues.zipWithIndex: _*)
} else {
TreeMap(pivotColumnValues.zipWithIndex: _*)(
TypeUtils.getInterpretedOrdering(pivotColumn.dataType))
}
val indexSize = pivotIndex.size
private val updateRow: (InternalRow, Int, Any) => Unit = PivotFirst.updateFunction(valueDataType)
override def update(mutableAggBuffer: InternalRow, inputRow: InternalRow): Unit = {
val pivotColValue = pivotColumn.eval(inputRow)
// We ignore rows whose pivot column value is not in the list of pivot column values.
val index = pivotIndex.getOrElse(pivotColValue, -1)
if (index >= 0) {
val value = valueColumn.eval(inputRow)
if (value != null) {
updateRow(mutableAggBuffer, mutableAggBufferOffset + index, value)
}
}
}
override def merge(mutableAggBuffer: InternalRow, inputAggBuffer: InternalRow): Unit = {
for (i <- 0 until indexSize) {
if (!inputAggBuffer.isNullAt(inputAggBufferOffset + i)) {
val value = inputAggBuffer.get(inputAggBufferOffset + i, valueDataType)
updateRow(mutableAggBuffer, mutableAggBufferOffset + i, value)
}
}
}
override def initialize(mutableAggBuffer: InternalRow): Unit = valueDataType match {
case d: DecimalType =>
// Per doc of setDecimal we need to do this instead of setNullAt for DecimalType.
for (i <- 0 until indexSize) {
mutableAggBuffer.setDecimal(mutableAggBufferOffset + i, null, d.precision)
}
case _ =>
for (i <- 0 until indexSize) {
mutableAggBuffer.setNullAt(mutableAggBufferOffset + i)
}
}
override def eval(input: InternalRow): Any = {
val result = new Array[Any](indexSize)
for (i <- 0 until indexSize) {
result(i) = input.get(mutableAggBufferOffset + i, valueDataType)
}
new GenericArrayData(result)
}
override def withNewInputAggBufferOffset(newInputAggBufferOffset: Int): ImperativeAggregate =
copy(inputAggBufferOffset = newInputAggBufferOffset)
override def withNewMutableAggBufferOffset(newMutableAggBufferOffset: Int): ImperativeAggregate =
copy(mutableAggBufferOffset = newMutableAggBufferOffset)
override val aggBufferAttributes: Seq[AttributeReference] =
pivotIndex.toList.sortBy(_._2).map { kv =>
AttributeReference(Option(kv._1).getOrElse("null").toString, valueDataType)()
}
override val aggBufferSchema: StructType = StructType.fromAttributes(aggBufferAttributes)
override val inputAggBufferAttributes: Seq[AttributeReference] =
aggBufferAttributes.map(_.newInstance())
}
| ConeyLiu/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/PivotFirst.scala | Scala | apache-2.0 | 6,046 |
package com.josephpconley.swagger2postman.models.swagger.v2
import play.api.libs.json.Json
case class SwaggerDoc(
swagger: String,
info: SwaggerInfo,
host: String,
basePath: String,
tags: Seq[SwaggerTag],
paths: Map[String, Map[String, SwaggerPath]])
object SwaggerDoc {
implicit val paramFmt = Json.format[SwaggerParam]
implicit val pathFmt = Json.format[SwaggerPath]
implicit val tagFmt = Json.format[SwaggerTag]
implicit val infoFmt = Json.format[SwaggerInfo]
implicit val docFmt = Json.format[SwaggerDoc]
}
case class SwaggerInfo(description: Option[String], version: String, title: String)
case class SwaggerTag(name: String, description: Option[String])
case class SwaggerPath(tags: Seq[String], summary: String, description: String, operationId: String, parameters: Seq[SwaggerParam])
case class SwaggerParam(in: String, name: String, description: Option[String], required: Boolean)
| hasithalakmal/RIP | RIP_Test/swagger2postman-master/src/main/scala/com/josephpconley/swagger2postman/models/swagger/v2/SwaggerDoc.scala | Scala | apache-2.0 | 919 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.infer
import cc.factorie.directed._
import scala.collection.mutable.{HashSet,HashMap,ArrayBuffer}
import cc.factorie.directed.{MaximizeGaussianVariance, MaximizeGaussianMean, MaximizeGeneratedDiscrete, MaximizeGate}
import cc.factorie.variable._
import cc.factorie.model.Model
/** An inference engine that finds score-maximizing values.
The "infer" method returns a summary holding the maximizing assignment, but does not change the current variable values.
By convention, subclass-implemented "apply" methods should change the current variable values to those that maximize;
this convention differs from other Infer instances, which do not typically change variable values. */
trait Maximize[-A<:Iterable[Var],-B<:Model] extends Infer[A,B] {
def maximize(vs:A, model:B, marginalizing:Summary = null): Unit = infer(vs, model, marginalizing).setToMaximize(null)
//def maximize(vs:A, model:B): Unit = maximize(vs, model, null)
// TODO Consider adding the following
//def twoBest(vs:Iterable[Variable], model:Model, summary:Summary[Marginal] = null): (Summary[Marginal], Summary[Marginal])
}
//trait MaximizeMarginalizing[-A<:Iterable[Var],-B<:Model,-C<:Summary] extends InferMarginalizing[A,B,C] {
// def maximize(vs:A, model:B, marginalizing:C) = infer(vs, model, marginalizing).setToMaximize(null)
// // TODO Consider adding the following
// //def twoBest(vs:Iterable[Variable], model:Model, summary:Summary[Marginal] = null): (Summary[Marginal], Summary[Marginal])
//}
/* A suite containing various recipes to maximize the value of variables to maximize some objective,
usually maximum likelihood. The suite tries each recipe in turn until it finds one that succeeds.
@author Andrew McCallum */
class MaximizeSuite extends Maximize[Iterable[Var],Model] {
def defaultSuite: Seq[Maximize[Iterable[Var],Model]] =
Seq(MaximizeGeneratedDiscrete.asInstanceOf[Maximize[Iterable[Var],Model]],
MaximizeGate.asInstanceOf[Maximize[Iterable[Var],Model]],
MaximizeProportions.asInstanceOf[Maximize[Iterable[Var],Model]],
MaximizeGaussianMean.asInstanceOf[Maximize[Iterable[Var],Model]],
//MaximizeGaussianMeansNoSummary,
MaximizeGaussianVariance.asInstanceOf[Maximize[Iterable[Var],Model]],
MaximizeByBPChain.asInstanceOf[Maximize[Iterable[Var],Model]])
val suite = new scala.collection.mutable.ArrayBuffer[Maximize[Iterable[Var],Model]]
suite ++= defaultSuite
//def infer(variables:Iterable[Variable], model:Model): Option[Summary[Marginal]] = None
def infer(varying:Iterable[Var], model:Model, marginalizing:Summary): Summary = {
// The handlers can be assured that the Seq[Factor] will be sorted alphabetically by class name
// This next line does the maximization
var summary = null.asInstanceOf[Summary]
val iterator = suite.iterator
while ((summary eq null) && iterator.hasNext) {
try {
summary = iterator.next().infer(varying, model)
} catch {
case e: ClassCastException => ()
}
}
summary
}
def apply(varying:Iterable[Var], model:Model): Summary = {
val summary = infer(varying, model)
if (summary eq null) throw new Error("No maximizing method found.")
summary.setToMaximize(null)
summary
}
// A convenient pretty interface, especially for tutorials and beginner users
def apply(varying:Var)(implicit model:DirectedModel): Summary = apply(Seq(varying), model)
//def apply(varying:Any*)(implicit model:DirectedModel): Summary = apply(varying, model)
}
object Maximize extends MaximizeSuite // A default instance of this class
//trait Maximizer[C] {
// def maximize(c:C)
//}
object SamplingMaximizer {
def apply[V <: Var with IterableSettings](model: Model)(implicit random: scala.util.Random) = new SamplingMaximizer[V](new VariableSettingsSampler[V](model))
}
class SamplingMaximizer[C](val sampler:ProposalSampler[C]) {
def maximize(varying:Iterable[C], iterations:Int): Iterable[Var] = {
var currentScore = 0.0
var maxScore = currentScore
val maxdiff = new DiffList
val origSamplerTemperature = sampler.temperature
val variablesTouched = new HashSet[Var]
def updateMaxScore(p:Proposal[C]): Unit = {
currentScore += p.modelScore // TODO Check proper handling of fbRatio
//println("SamplingMaximizer modelScore="+p.modelScore+" currentScore="+currentScore)
variablesTouched ++= p.diff.map(_.variable)
if (currentScore > maxScore) {
maxScore = currentScore
maxdiff.clear()
//println("SamplingMaximizer maxScore="+maxScore)
} else if (p.diff.size > 0) {
maxdiff appendAll p.diff
//println("SamplingMaximizer diff.size="+diff.size)
}
}
val updateHook: Proposal[C]=>Unit = updateMaxScore _
sampler.proposalHooks += updateHook // Add temporary hook
sampler.processAll(varying, iterations)
sampler.proposalHooks -= updateHook // Remove our temporary hook
sampler.temperature = origSamplerTemperature // Put back the sampler's temperature where we found it
maxdiff.undo() // Go back to maximum scoring configuration so we return having changed the config to the best
variablesTouched
}
def maximize(varying:Iterable[C], iterations:Int = 50, initialTemperature: Double = 1.0, finalTemperature: Double = 0.01, rounds:Int = 5): Iterable[Var] = {
//sampler.proposalsHooks += { (props:Seq[Proposal]) => { props.foreach(p => println(p.modelScore)) }}
val iterationsPerRound = if (iterations < rounds) 1 else iterations/rounds
var iterationsRemaining = iterations
if (iterationsRemaining == 1) sampler.temperature = finalTemperature
val variablesTouched = new HashSet[Var]
sampler.temperature = initialTemperature
while (iterationsRemaining > 0) {
val iterationsNow = math.min(iterationsPerRound, iterationsRemaining)
variablesTouched ++= maximize(varying, iterationsNow)
iterationsRemaining -= iterationsNow
sampler.temperature += (finalTemperature-initialTemperature)/rounds // Adding a negative number
//println("Reducing temperature to "+sampler.temperature)
}
variablesTouched
//new SamplingMaximizerLattice[V](diff, maxScore)
}
def apply(varying:Iterable[C], iterations:Int = 50, initialTemperature: Double = 1.0, finalTemperature: Double = 0.01, rounds:Int = 5): AssignmentSummary = {
new AssignmentSummary(new HashMapAssignment(maximize(varying, iterations, initialTemperature, finalTemperature, rounds)))
}
}
| hlin117/factorie | src/main/scala/cc/factorie/infer/Maximize.scala | Scala | apache-2.0 | 7,275 |
package dotty.tools
package dotc
package core
import Types._
import Flags._
import Contexts._
import util.{SimpleMap, DotClass}
import reporting._
import printing.{Showable, Printer}
import printing.Texts._
import config.Config
import collection.mutable
class TyperState(r: Reporter) extends DotClass with Showable {
/** The current reporter */
def reporter = r
/** The current constraint set */
def constraint: Constraint =
new OrderingConstraint(SimpleMap.Empty, SimpleMap.Empty, SimpleMap.Empty)
def constraint_=(c: Constraint)(implicit ctx: Context): Unit = {}
/** The uninstantiated variables */
def uninstVars = constraint.uninstVars
/** The ephemeral flag is set as a side effect if an operation accesses
* the underlying type of a type variable. The reason we need this flag is
* that any such operation is not referentially transparent; it might logically change
* its value at the moment the type variable is instantiated. Caching code needs to
* check the ephemeral flag; If the flag is set during an operation, the result
* of that operation should not be cached.
*/
def ephemeral: Boolean = false
def ephemeral_=(x: Boolean): Unit = ()
/** Gives for each instantiated type var that does not yet have its `inst` field
* set, the instance value stored in the constraint. Storing instances in constraints
* is done only in a temporary way for contexts that may be retracted
* without also retracting the type var as a whole.
*/
def instType(tvar: TypeVar)(implicit ctx: Context): Type = constraint.entry(tvar.origin) match {
case _: TypeBounds => NoType
case tp: PolyParam =>
var tvar1 = constraint.typeVarOfParam(tp)
if (tvar1.exists) tvar1 else tp
case tp => tp
}
/** A fresh typer state with the same constraint as this one.
* @param isCommittable The constraint can be committed to an enclosing context.
*/
def fresh(isCommittable: Boolean): TyperState = this
/** A fresh type state with the same constraint as this one and the given reporter */
def withReporter(reporter: Reporter) = new TyperState(reporter)
/** Commit state so that it gets propagated to enclosing context */
def commit()(implicit ctx: Context): Unit = unsupported("commit")
/** Make type variable instances permanent by assigning to `inst` field if
* type variable instantiation cannot be retracted anymore. Then, remove
* no-longer needed constraint entries.
*/
def gc()(implicit ctx: Context): Unit = ()
/** Is it allowed to commit this state? */
def isCommittable: Boolean = false
/** Can this state be transitively committed until the top-level? */
def isGlobalCommittable: Boolean = false
def tryWithFallback[T](op: => T)(fallback: => T)(implicit ctx: Context): T = unsupported("tryWithFallBack")
override def toText(printer: Printer): Text = "ImmutableTyperState"
}
class MutableTyperState(previous: TyperState, r: Reporter, override val isCommittable: Boolean)
extends TyperState(r) {
private var myReporter = r
override def reporter = myReporter
private var myConstraint: Constraint = previous.constraint
override def constraint = myConstraint
override def constraint_=(c: Constraint)(implicit ctx: Context) = {
if (Config.debugCheckConstraintsClosed && isGlobalCommittable) c.checkClosed()
myConstraint = c
}
private var myEphemeral: Boolean = previous.ephemeral
override def ephemeral = myEphemeral
override def ephemeral_=(x: Boolean): Unit = { myEphemeral = x }
override def fresh(isCommittable: Boolean): TyperState =
new MutableTyperState(this, new StoreReporter, isCommittable)
override def withReporter(reporter: Reporter) =
new MutableTyperState(this, reporter, isCommittable)
override val isGlobalCommittable =
isCommittable &&
(!previous.isInstanceOf[MutableTyperState] || previous.isGlobalCommittable)
/** Commit typer state so that its information is copied into current typer state
* In addition (1) the owning state of undetermined or temporarily instantiated
* type variables changes from this typer state to the current one. (2) Variables
* that were temporarily instantiated in the current typer state are permanently
* instantiated instead.
*/
override def commit()(implicit ctx: Context) = {
val targetState = ctx.typerState
assert(isCommittable)
targetState.constraint = constraint
constraint foreachTypeVar { tvar =>
if (tvar.owningState eq this)
tvar.owningState = targetState
}
targetState.ephemeral = ephemeral
targetState.gc()
reporter.flush()
}
override def gc()(implicit ctx: Context): Unit = {
val toCollect = new mutable.ListBuffer[PolyType]
constraint foreachTypeVar { tvar =>
if (!tvar.inst.exists) {
val inst = instType(tvar)
if (inst.exists && (tvar.owningState eq this)) {
tvar.inst = inst
val poly = tvar.origin.binder
if (constraint.isRemovable(poly)) toCollect += poly
}
}
}
for (poly <- toCollect)
constraint = constraint.remove(poly)
}
/** Try operation `op`; if it produces errors, execute `fallback` with constraint and
* reporter as they were before `op` was executed. This is similar to `typer/tryEither`,
* but with one important difference: Any type variable instantiations produced by `op`
* are persisted even if `op` fails. This is normally not what one wants and therefore
* it is recommended to use
*
* tryEither { implicit ctx => op } { (_, _) => fallBack }
*
* instead of
*
* ctx.tryWithFallback(op)(fallBack)
*
* `tryWithFallback` is only used when an implicit parameter search fails
* and the whole expression is subsequently retype-checked with a Wildcard
* expected type (so as to allow an implicit conversion on the result and
* avoid over-constraining the implicit parameter search). In this case,
* the only type variables that might be falsely instantiated by `op` but
* not by `fallBack` are type variables in the typed expression itself, and
* these will be thrown away and new ones will be created on re-typing.
* So `tryWithFallback` is safe. It is also necessary because without it
* we do not propagate enough instantiation information into the implicit search
* and this might lead to a missing parameter type error. This is exhibited
* at several places in the test suite (for instance in `pos_typers`).
* Overall, this is rather ugly, but despite trying for 2 days I have not
* found a better solution.
*/
override def tryWithFallback[T](op: => T)(fallback: => T)(implicit ctx: Context): T = {
val storeReporter = new StoreReporter
val savedReporter = myReporter
myReporter = storeReporter
val savedConstraint = myConstraint
val result = try op finally myReporter = savedReporter
if (!storeReporter.hasErrors) result
else {
myConstraint = savedConstraint
fallback
}
}
override def toText(printer: Printer): Text = constraint.toText(printer)
}
| yusuke2255/dotty | src/dotty/tools/dotc/core/TyperState.scala | Scala | bsd-3-clause | 7,140 |
import reactivemongo.bson.{
BSON,
BSONDecimal,
BSONDocument,
BSONDocumentHandler,
BSONDocumentReader,
BSONDocumentWriter,
BSONDouble,
BSONHandler,
BSONInteger,
BSONNull,
BSONReader,
BSONWriter,
Macros
}
import reactivemongo.bson.exceptions.DocumentKeyNotFound
import org.specs2.matcher.MatchResult
final class MacroSpec extends org.specs2.mutable.Specification {
"Macros" title
import MacroTest._
import BSONDocument.pretty
"Formatter" should {
"handle primitives" in {
roundtrip(
Primitives(1.2, "hai", true, 42, Long.MaxValue),
Macros.handler[Primitives])
}
"support nesting" in {
implicit val personFormat = Macros.handler[Person]
val doc = Pet("woof", Person("john", "doe"))
roundtrip(doc, Macros.handler[Pet])
}
"support option" in {
val format = Macros.handler[Optional]
val some = Optional("some", Some("value"))
val none = Optional("none", None)
roundtrip(some, format) and roundtrip(none, format)
}
"not support type mismatch for optional value" in {
//val reader: BSONReader[BSONDocument, Optional] =
Macros.reader[Optional].read(
BSONDocument(
"name" -> "invalidValueType",
"value" -> 4)) must throwA[Exception]("BSONInteger")
}
"support null for optional value" in {
Macros.reader[Optional].read(
BSONDocument(
"name" -> "name",
"value" -> BSONNull)).value must be(None)
}
"write empty option as null" in {
Macros.writer[OptionalAsNull].
write(OptionalAsNull("asNull", None)) must_=== BSONDocument(
"name" -> "asNull",
"value" -> BSONNull)
}
"support seq" in {
roundtrip(
WordLover("john", Seq("hello", "world")),
Macros.handler[WordLover])
}
"support single member case classes" in {
roundtrip(
Single(BigDecimal("12.345")),
Macros.handler[Single])
}
"support single member options" in {
val f = Macros.handler[OptionalSingle]
roundtrip(OptionalSingle(Some("foo")), f) and {
roundtrip(OptionalSingle(None), f)
}
}
"support generic case class Foo" >> {
implicit def singleHandler = Macros.handler[Single]
"directly" in {
roundtrip(
Foo(Single(BigDecimal(123L)), "ipsum"),
Macros.handler[Foo[Single]])
}
"from generic function" in {
def handler[T](implicit w: BSONDocumentWriter[T], r: BSONDocumentReader[T]) = Macros.handler[Foo[T]]
roundtrip(Foo(Single(BigDecimal(1.23D)), "ipsum"), handler[Single])
}
}
"support generic case class GenSeq" in {
implicit def singleHandler = new BSONWriter[Single, BSONDecimal] with BSONReader[BSONDecimal, Single] with BSONHandler[BSONDecimal, Single] {
def write(single: Single) = BSONDecimal.fromBigDecimal(single.value).get
def read(dec: BSONDecimal) =
BSONDecimal.toBigDecimal(dec).map(Single(_)).get
}
implicit def optionHandler[T](implicit h: BSONHandler[BSONDecimal, T]): BSONDocumentHandler[Option[T]] = new BSONDocumentReader[Option[T]] with BSONDocumentWriter[Option[T]] with BSONHandler[BSONDocument, Option[T]] {
def read(doc: BSONDocument): Option[T] =
doc.getAs[BSONDecimal](f"$$some").map(h.read(_))
def write(single: Option[T]) = single match {
case Some(v) => BSONDocument(f"$$some" -> h.write(v))
case _ => BSONDocument.empty
}
}
def genSeqHandler[T: BSONDocumentHandler]: BSONDocumentHandler[GenSeq[T]] = Macros.handler[GenSeq[T]]
val seq = GenSeq(Seq(
Option.empty[Single],
Option(Single(BigDecimal(1)))), 1)
roundtrip(seq, genSeqHandler[Option[Single]])
}
"handle overloaded apply correctly" in {
val doc1 = OverloadedApply("hello")
val doc2 = OverloadedApply(List("hello", "world"))
val f = Macros.handler[OverloadedApply]
roundtrip(doc1, f)
roundtrip(doc2, f)
}
"handle overloaded apply with different number of arguments correctly" in {
val doc1 = OverloadedApply2("hello", 5)
val doc2 = OverloadedApply2("hello")
val f = Macros.handler[OverloadedApply2]
roundtrip(doc1, f)
roundtrip(doc2, f)
}
"handle overloaded apply with 0 number of arguments correctly" in {
val doc1 = OverloadedApply3("hello", 5)
val doc2 = OverloadedApply3()
val f = Macros.handler[OverloadedApply3]
roundtrip(doc1, f)
roundtrip(doc2, f)
}
"case class and handler inside trait" in {
val t = new NestModule {}
roundtrip(t.Nested("it works"), t.format)
}
"case class inside trait with handler outside" in {
val t = new NestModule {}
import t._ //you need Nested in scope because t.Nested won't work
val format = Macros.handler[Nested]
roundtrip(Nested("it works"), format)
}
"respect compilation options" in {
val format = Macros.handlerOpts[Person, Macros.Options.Verbose] //more stuff in compiler log
roundtrip(Person("john", "doe"), format)
}
"not persist class name for case class" in {
val person = Person("john", "doe")
val format = Macros.handlerOpts[Person, Macros.Options.SaveSimpleName]
val doc = format write person
doc.getAs[String]("className") must beNone and {
roundtrip(person, format)
}
}
"handle union types (ADT)" in {
import Union._
import Macros.Options._
val a = UA(1)
val b = UB("hai")
val format = Macros.handlerOpts[UT, UnionType[UA \\/ UB \\/ UC \\/ UD \\/ UF.type] with AutomaticMaterialization]
format.write(a).getAs[String]("className").
aka("class #1") must beSome("MacroTest.Union.UA") and {
format.write(b).getAs[String]("className").
aka("class #2") must beSome("MacroTest.Union.UB")
} and roundtrip(a, format) and roundtrip(b, format)
}
"handle union types as sealed family" in {
import Union._
import Macros.Options._
val a = UA2(1)
val b = UB2("hai")
val format = Macros.handlerOpts[UT2, UnionType[UA2 \\/ UB2] with AutomaticMaterialization]
format.write(a).getAs[String]("className").
aka("class #1") must beSome("MacroTest.Union.UA2") and {
format.write(b).getAs[String]("className").
aka("class #2") must beSome("MacroTest.Union.UB2")
} and roundtrip(a, format) and roundtrip(b, format)
}
"handle union types (ADT) with simple names" in {
import Union._
import Macros.Options._
val a = UA(1)
val b = UB("hai")
val format = Macros.handlerOpts[UT, SimpleUnionType[UA \\/ UB \\/ UC \\/ UD] with AutomaticMaterialization]
format.write(a).getAs[String]("className") must beSome("UA") and {
format.write(b).getAs[String]("className") must beSome("UB")
} and roundtrip(a, format) and roundtrip(b, format)
}
"handle recursive structure" in {
import TreeModule._
//handlers defined at tree module
val tree: Tree = Node(Leaf("hi"), Node(Leaf("hello"), Leaf("world")))
roundtrip(tree, Tree.bson)
}
"grab an implicit handler for type used in union" in {
import TreeCustom._
val tree: Tree = Node(Leaf("hi"), Node(Leaf("hello"), Leaf("world")))
val serialized = BSON writeDocument tree
val deserialized = BSON.readDocument[Tree](serialized)
val expected = Node(Leaf("hai"), Node(Leaf("hai"), Leaf("hai")))
deserialized mustEqual expected
}
"handle empty case classes" in {
val empty = Empty()
val format = Macros.handler[Empty]
roundtrip(empty, format)
}
"do nothing with objects" in {
val format = Macros.handler[EmptyObject.type]
roundtrip(EmptyObject, format)
}
"handle ADTs with objects" in {
import IntListModule._
roundtripImp[IntList](Tail) and {
roundtripImp[IntList](Cons(1, Cons(2, Cons(3, Tail))))
}
}
"automate Union on sealed traits" in {
import Macros.Options._
import Union._
implicit val format = Macros.handlerOpts[UT, AutomaticMaterialization]
format.write(UA(1)).getAs[String]("className").
aka("class #1") must beSome("MacroTest.Union.UA") and {
format.write(UB("buzz")).getAs[String]("className").
aka("class #2") must beSome("MacroTest.Union.UB")
} and roundtripImp[UT](UA(17)) and roundtripImp[UT](UB("foo")) and {
roundtripImp[UT](UC("bar")) and roundtripImp[UT](UD("baz"))
} and roundtripImp[UT](UF)
}
"support automatic implementations search with nested traits" in {
import Macros.Options._
import InheritanceModule._
implicit val format = Macros.handlerOpts[T, AutomaticMaterialization]
format.write(A()).getAs[String]("className").
aka("class #1") must beSome("MacroTest.InheritanceModule.A") and {
format.write(B).getAs[String]("className").
aka("class #2") must beSome("MacroTest.InheritanceModule.B")
} and {
roundtripImp[T](A()) and roundtripImp[T](B) and roundtripImp[T](C())
}
}
"automate Union on sealed traits with simple name" in {
import Macros.Options._
import Union._
implicit val format = Macros.handlerOpts[UT, SaveSimpleName with AutomaticMaterialization]
format.write(UA(1)).getAs[String]("className") must beSome("UA")
format.write(UB("buzz")).getAs[String]("className") must beSome("UB")
roundtripImp[UT](UA(17)) and roundtripImp[UT](UB("foo")) and {
roundtripImp[UT](UC("bar")) and roundtripImp[UT](UD("baz"))
}
}
"support automatic implementations search with nested traits with simple name" in {
import Macros.Options._
import InheritanceModule._
implicit val format = Macros.handlerOpts[T, SaveSimpleName]
format.write(A()).getAs[String]("className") must beSome("A")
format.write(B).getAs[String]("className") must beSome("B")
roundtripImp[T](A()) and roundtripImp[T](B) and roundtripImp[T](C())
}
"support overriding keys with annotations" in {
implicit val format = Macros.handler[RenamedId]
val doc = RenamedId(value = "some value")
val serialized = format write doc
serialized mustEqual (
BSONDocument("_id" -> doc.myID, "value" -> doc.value)) and {
format.read(serialized) must_== doc
}
}
"skip ignored fields" >> {
"with Pair type" in {
val pairHandler = Macros.handler[Pair]
val doc = pairHandler.write(Pair(left = "left", right = "right"))
doc.aka(pretty(doc)) must beTypedEqualTo(
BSONDocument("right" -> "right"))
}
"along with Key annotation" in {
implicit val handler: BSONDocumentWriter[IgnoredAndKey] =
Macros.writer[IgnoredAndKey]
val doc = handler.write(IgnoredAndKey(Person("john", "doe"), "foo"))
doc.aka(pretty(doc)) must beTypedEqualTo(
BSONDocument("second" -> "foo"))
}
}
"be generated for class class with self reference" in {
val h = Macros.handler[Bar]
val bar1 = Bar("bar1", None)
val doc1 = BSONDocument("name" -> "bar1")
h.read(doc1) must_== bar1 and {
h.read(BSONDocument("name" -> "bar2", "next" -> doc1)).
aka("bar2") must_== Bar("bar2", Some(bar1))
} and (h.write(bar1) must_== doc1) and {
h.write(Bar("bar2", Some(bar1))) must_== BSONDocument(
"name" -> "bar2", "next" -> doc1)
}
}
"support @Flatten annotation" in {
shapeless.test.illTyped("Macros.handler[InvalidRecursive]")
shapeless.test.illTyped("Macros.handler[InvalidNonDoc]")
roundtrip(
LabelledRange("range1", Range(start = 2, end = 5)),
Macros.handler[LabelledRange])
}
"handle case class with implicits" >> {
val doc1 = BSONDocument("pos" -> 2, "text" -> "str")
val doc2 = BSONDocument("ident" -> "id", "value" -> 23.456D)
val fixture1 = WithImplicit1(2, "str")
val fixture2 = WithImplicit2("id", 23.456D)
def readSpec1(r: BSONDocumentReader[WithImplicit1]) =
r.read(doc1) must_== fixture1
def writeSpec2(w: BSONDocumentWriter[WithImplicit2[Double]]) =
w.write(fixture2) must_== doc2
"to generate reader" in readSpec1(Macros.reader[WithImplicit1])
"to generate writer with type parameters" in writeSpec2(
Macros.writer[WithImplicit2[Double]])
"to generate handler" in {
val f1 = Macros.handler[WithImplicit1]
val f2 = Macros.handler[WithImplicit2[Double]]
readSpec1(f1) and (f1.write(fixture1) must_== doc1) and {
writeSpec2(f2) and (f2.read(doc2) must_== fixture2)
}
}
}
}
"Reader" should {
"throw meaningful exception if required field is missing" in {
val personDoc = BSONDocument("firstName" -> "joe")
Macros.reader[Person].read(personDoc) must throwA[DocumentKeyNotFound].
like { case e => e.getMessage must contain("lastName") }
}
"throw meaningful exception if field has another type" in {
val primitivesDoc = BSONDocument(
"dbl" -> 2D, "str" -> "str", "bl" -> true, "int" -> 2D, "long" -> 2L)
Macros.reader[Primitives].read(primitivesDoc).
aka("read") must throwA[ClassCastException].like {
case e =>
e.getMessage must contain(classOf[BSONDouble].getName) and {
e.getMessage must contain(classOf[BSONInteger].getName)
}
}
}
"be generated for a generic case class" in {
implicit def singleReader = Macros.reader[Single]
val r = Macros.reader[Foo[Single]]
val big = BigDecimal(1.23D)
r.read(BSONDocument(
"bar" -> BSONDocument("value" -> big),
"lorem" -> "ipsum")) must_== Foo(Single(big), "ipsum")
}
"be generated for class class with self reference" in {
val r = Macros.reader[Bar]
val bar1 = Bar("bar1", None)
val doc1 = BSONDocument("name" -> "bar1")
r.read(doc1) must_== bar1 and {
r.read(BSONDocument("name" -> "bar2", "next" -> doc1)).
aka("bar2") must_== Bar("bar2", Some(bar1))
}
}
"be generated with @Flatten annotation" in {
shapeless.test.illTyped("Macros.reader[InvalidRecursive]")
shapeless.test.illTyped("Macros.reader[InvalidNonDoc]")
val r = Macros.reader[LabelledRange]
val doc = BSONDocument("name" -> "range1", "start" -> 2, "end" -> 5)
val lr = LabelledRange("range1", Range(start = 2, end = 5))
r.read(doc) must_== lr
}
}
"Writer" should {
"be generated for a generic case class" in {
implicit def singleWriter = Macros.writer[Single]
val w = Macros.writer[Foo[Single]]
w.write(Foo(Single(BigDecimal(1)), "ipsum")) must_== BSONDocument(
"bar" -> BSONDocument("value" -> BigDecimal(1)),
"lorem" -> "ipsum")
}
"be generated for class class with self reference" in {
val w = Macros.writer[Bar]
val bar1 = Bar("bar1", None)
val doc1 = BSONDocument("name" -> "bar1")
w.write(bar1) must_== doc1 and {
w.write(Bar("bar2", Some(bar1))) must_== BSONDocument(
"name" -> "bar2", "next" -> doc1)
}
}
"be generated with @Flatten annotation" in {
shapeless.test.illTyped("Macros.writer[InvalidRecursive]")
shapeless.test.illTyped("Macros.writer[InvalidNonDoc]")
val w = Macros.writer[LabelledRange]
val lr = LabelledRange("range2", Range(start = 1, end = 3))
val doc = BSONDocument("name" -> "range2", "start" -> 1, "end" -> 3)
w.write(lr) must_== doc
}
}
// ---
def roundtrip[A](original: A)(implicit reader: BSONReader[BSONDocument, A], writer: BSONWriter[A, BSONDocument]): MatchResult[Any] = {
def serialized = writer write original
def deserialized = reader read serialized
original mustEqual deserialized
}
def roundtrip[A](original: A, format: BSONReader[BSONDocument, A] with BSONWriter[A, BSONDocument]): MatchResult[Any] = roundtrip(original)(format, format)
def roundtripImp[A](data: A)(implicit reader: BSONReader[BSONDocument, A], writer: BSONWriter[A, BSONDocument]) = roundtrip(data)
}
| cchantep/ReactiveMongo | macros/src/test/scala/MacroSpec.scala | Scala | apache-2.0 | 16,463 |
inline def label(x: Int, inline g: Int => String): String = g(x)
def f: Int => String = ???
def label2(g: Int) = label(g, f)
| dotty-staging/dotty | tests/pos/i9342a.scala | Scala | apache-2.0 | 125 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util
import scala.reflect.ClassTag
import breeze.linalg.{DenseVector => BDV, SparseVector => BSV}
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.PartitionwiseSampledRDD
import org.apache.spark.util.random.BernoulliCellSampler
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.linalg.{SparseVector, DenseVector, Vector, Vectors}
import org.apache.spark.mllib.linalg.BLAS.dot
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
/**
* Helper methods to load, save and pre-process data used in ML Lib.
*/
@Since("0.8.0")
object MLUtils {
private[mllib] lazy val EPSILON = {
var eps = 1.0
while ((1.0 + (eps / 2.0)) != 1.0) {
eps /= 2.0
}
eps
}
/**
* Loads labeled data in the LIBSVM format into an RDD[LabeledPoint].
* The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR.
* Each line represents a labeled sparse feature vector using the following format:
* {{{label index1:value1 index2:value2 ...}}}
* where the indices are one-based and in ascending order.
* This method parses each line into a [[org.apache.spark.mllib.regression.LabeledPoint]],
* where the feature indices are converted to zero-based.
*
* @param sc Spark context
* @param path file or directory path in any Hadoop-supported file system URI
* @param numFeatures number of features, which will be determined from the input data if a
* nonpositive value is given. This is useful when the dataset is already split
* into multiple files and you want to load them separately, because some
* features may not present in certain files, which leads to inconsistent
* feature dimensions.
* @param minPartitions min number of partitions
* @return labeled data stored as an RDD[LabeledPoint]
*/
@Since("1.0.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
numFeatures: Int,
minPartitions: Int): RDD[LabeledPoint] = {
val parsed = sc.textFile(path, minPartitions)
.map(_.trim)
.filter(line => !(line.isEmpty || line.startsWith("#")))
.map { line =>
val items = line.split(' ')
val label = items.head.toDouble
val (indices, values) = items.tail.filter(_.nonEmpty).map { item =>
val indexAndValue = item.split(':')
val index = indexAndValue(0).toInt - 1 // Convert 1-based indices to 0-based.
val value = indexAndValue(1).toDouble
(index, value)
}.unzip
// check if indices are one-based and in ascending order
var previous = -1
var i = 0
val indicesLength = indices.length
while (i < indicesLength) {
val current = indices(i)
require(current > previous, "indices should be one-based and in ascending order" )
previous = current
i += 1
}
(label, indices.toArray, values.toArray)
}
// Determine number of features.
val d = if (numFeatures > 0) {
numFeatures
} else {
parsed.persist(StorageLevel.MEMORY_ONLY)
parsed.map { case (label, indices, values) =>
indices.lastOption.getOrElse(0)
}.reduce(math.max) + 1
}
parsed.map { case (label, indices, values) =>
LabeledPoint(label, Vectors.sparse(d, indices, values))
}
}
// Convenient methods for `loadLibSVMFile`.
@Since("1.0.0")
@deprecated("use method without multiclass argument, which no longer has effect", "1.1.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
multiclass: Boolean,
numFeatures: Int,
minPartitions: Int): RDD[LabeledPoint] =
loadLibSVMFile(sc, path, numFeatures, minPartitions)
/**
* Loads labeled data in the LIBSVM format into an RDD[LabeledPoint], with the default number of
* partitions.
*/
@Since("1.0.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
numFeatures: Int): RDD[LabeledPoint] =
loadLibSVMFile(sc, path, numFeatures, sc.defaultMinPartitions)
@Since("1.0.0")
@deprecated("use method without multiclass argument, which no longer has effect", "1.1.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
multiclass: Boolean,
numFeatures: Int): RDD[LabeledPoint] =
loadLibSVMFile(sc, path, numFeatures)
@Since("1.0.0")
@deprecated("use method without multiclass argument, which no longer has effect", "1.1.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
multiclass: Boolean): RDD[LabeledPoint] =
loadLibSVMFile(sc, path)
/**
* Loads binary labeled data in the LIBSVM format into an RDD[LabeledPoint], with number of
* features determined automatically and the default number of partitions.
*/
@Since("1.0.0")
def loadLibSVMFile(sc: SparkContext, path: String): RDD[LabeledPoint] =
loadLibSVMFile(sc, path, -1)
/**
* Save labeled data in LIBSVM format.
* @param data an RDD of LabeledPoint to be saved
* @param dir directory to save the data
*
* @see [[org.apache.spark.mllib.util.MLUtils#loadLibSVMFile]]
*/
@Since("1.0.0")
def saveAsLibSVMFile(data: RDD[LabeledPoint], dir: String) {
// TODO: allow to specify label precision and feature precision.
val dataStr = data.map { case LabeledPoint(label, features) =>
val sb = new StringBuilder(label.toString)
features.foreachActive { case (i, v) =>
sb += ' '
sb ++= s"${i + 1}:$v"
}
sb.mkString
}
dataStr.saveAsTextFile(dir)
}
/**
* Loads vectors saved using `RDD[Vector].saveAsTextFile`.
* @param sc Spark context
* @param path file or directory path in any Hadoop-supported file system URI
* @param minPartitions min number of partitions
* @return vectors stored as an RDD[Vector]
*/
@Since("1.1.0")
def loadVectors(sc: SparkContext, path: String, minPartitions: Int): RDD[Vector] =
sc.textFile(path, minPartitions).map(Vectors.parse)
/**
* Loads vectors saved using `RDD[Vector].saveAsTextFile` with the default number of partitions.
*/
@Since("1.1.0")
def loadVectors(sc: SparkContext, path: String): RDD[Vector] =
sc.textFile(path, sc.defaultMinPartitions).map(Vectors.parse)
/**
* Loads labeled points saved using `RDD[LabeledPoint].saveAsTextFile`.
* @param sc Spark context
* @param path file or directory path in any Hadoop-supported file system URI
* @param minPartitions min number of partitions
* @return labeled points stored as an RDD[LabeledPoint]
*/
@Since("1.1.0")
def loadLabeledPoints(sc: SparkContext, path: String, minPartitions: Int): RDD[LabeledPoint] =
sc.textFile(path, minPartitions).map(LabeledPoint.parse)
/**
* Loads labeled points saved using `RDD[LabeledPoint].saveAsTextFile` with the default number of
* partitions.
*/
@Since("1.1.0")
def loadLabeledPoints(sc: SparkContext, dir: String): RDD[LabeledPoint] =
loadLabeledPoints(sc, dir, sc.defaultMinPartitions)
/**
* Load labeled data from a file. The data format used here is
* L, f1 f2 ...
* where f1, f2 are feature values in Double and L is the corresponding label as Double.
*
* @param sc SparkContext
* @param dir Directory to the input data files.
* @return An RDD of LabeledPoint. Each labeled point has two elements: the first element is
* the label, and the second element represents the feature values (an array of Double).
*
* @deprecated Should use [[org.apache.spark.rdd.RDD#saveAsTextFile]] for saving and
* [[org.apache.spark.mllib.util.MLUtils#loadLabeledPoints]] for loading.
*/
@Since("1.0.0")
@deprecated("Should use MLUtils.loadLabeledPoints instead.", "1.0.1")
def loadLabeledData(sc: SparkContext, dir: String): RDD[LabeledPoint] = {
sc.textFile(dir).map { line =>
val parts = line.split(',')
val label = parts(0).toDouble
val features = Vectors.dense(parts(1).trim().split(' ').map(_.toDouble))
LabeledPoint(label, features)
}
}
/**
* Save labeled data to a file. The data format used here is
* L, f1 f2 ...
* where f1, f2 are feature values in Double and L is the corresponding label as Double.
*
* @param data An RDD of LabeledPoints containing data to be saved.
* @param dir Directory to save the data.
*
* @deprecated Should use [[org.apache.spark.rdd.RDD#saveAsTextFile]] for saving and
* [[org.apache.spark.mllib.util.MLUtils#loadLabeledPoints]] for loading.
*/
@Since("1.0.0")
@deprecated("Should use RDD[LabeledPoint].saveAsTextFile instead.", "1.0.1")
def saveLabeledData(data: RDD[LabeledPoint], dir: String) {
val dataStr = data.map(x => x.label + "," + x.features.toArray.mkString(" "))
dataStr.saveAsTextFile(dir)
}
/**
* :: Experimental ::
* Return a k element array of pairs of RDDs with the first element of each pair
* containing the training data, a complement of the validation data and the second
* element, the validation data, containing a unique 1/kth of the data. Where k=numFolds.
*/
@Since("1.0.0")
@Experimental
def kFold[T: ClassTag](rdd: RDD[T], numFolds: Int, seed: Int): Array[(RDD[T], RDD[T])] = {
val numFoldsF = numFolds.toFloat
(1 to numFolds).map { fold =>
val sampler = new BernoulliCellSampler[T]((fold - 1) / numFoldsF, fold / numFoldsF,
complement = false)
val validation = new PartitionwiseSampledRDD(rdd, sampler, true, seed)
val training = new PartitionwiseSampledRDD(rdd, sampler.cloneComplement(), true, seed)
(training, validation)
}.toArray
}
/**
* Returns a new vector with `1.0` (bias) appended to the input vector.
*/
@Since("1.0.0")
def appendBias(vector: Vector): Vector = {
vector match {
case dv: DenseVector =>
val inputValues = dv.values
val inputLength = inputValues.length
val outputValues = Array.ofDim[Double](inputLength + 1)
System.arraycopy(inputValues, 0, outputValues, 0, inputLength)
outputValues(inputLength) = 1.0
Vectors.dense(outputValues)
case sv: SparseVector =>
val inputValues = sv.values
val inputIndices = sv.indices
val inputValuesLength = inputValues.length
val dim = sv.size
val outputValues = Array.ofDim[Double](inputValuesLength + 1)
val outputIndices = Array.ofDim[Int](inputValuesLength + 1)
System.arraycopy(inputValues, 0, outputValues, 0, inputValuesLength)
System.arraycopy(inputIndices, 0, outputIndices, 0, inputValuesLength)
outputValues(inputValuesLength) = 1.0
outputIndices(inputValuesLength) = dim
Vectors.sparse(dim + 1, outputIndices, outputValues)
case _ => throw new IllegalArgumentException(s"Do not support vector type ${vector.getClass}")
}
}
/**
* Returns the squared Euclidean distance between two vectors. The following formula will be used
* if it does not introduce too much numerical error:
* <pre>
* \\|a - b\\|_2^2 = \\|a\\|_2^2 + \\|b\\|_2^2 - 2 a^T b.
* </pre>
* When both vector norms are given, this is faster than computing the squared distance directly,
* especially when one of the vectors is a sparse vector.
*
* @param v1 the first vector
* @param norm1 the norm of the first vector, non-negative
* @param v2 the second vector
* @param norm2 the norm of the second vector, non-negative
* @param precision desired relative precision for the squared distance
* @return squared distance between v1 and v2 within the specified precision
*/
private[mllib] def fastSquaredDistance(
v1: Vector,
norm1: Double,
v2: Vector,
norm2: Double,
precision: Double = 1e-6): Double = {
val n = v1.size
require(v2.size == n)
require(norm1 >= 0.0 && norm2 >= 0.0)
val sumSquaredNorm = norm1 * norm1 + norm2 * norm2
val normDiff = norm1 - norm2
var sqDist = 0.0
/*
* The relative error is
* <pre>
* EPSILON * ( \\|a\\|_2^2 + \\|b\\\\_2^2 + 2 |a^T b|) / ( \\|a - b\\|_2^2 ),
* </pre>
* which is bounded by
* <pre>
* 2.0 * EPSILON * ( \\|a\\|_2^2 + \\|b\\|_2^2 ) / ( (\\|a\\|_2 - \\|b\\|_2)^2 ).
* </pre>
* The bound doesn't need the inner product, so we can use it as a sufficient condition to
* check quickly whether the inner product approach is accurate.
*/
val precisionBound1 = 2.0 * EPSILON * sumSquaredNorm / (normDiff * normDiff + EPSILON)
if (precisionBound1 < precision) {
sqDist = sumSquaredNorm - 2.0 * dot(v1, v2)
} else if (v1.isInstanceOf[SparseVector] || v2.isInstanceOf[SparseVector]) {
val dotValue = dot(v1, v2)
sqDist = math.max(sumSquaredNorm - 2.0 * dotValue, 0.0)
val precisionBound2 = EPSILON * (sumSquaredNorm + 2.0 * math.abs(dotValue)) /
(sqDist + EPSILON)
if (precisionBound2 > precision) {
sqDist = Vectors.sqdist(v1, v2)
}
} else {
sqDist = Vectors.sqdist(v1, v2)
}
sqDist
}
/**
* When `x` is positive and large, computing `math.log(1 + math.exp(x))` will lead to arithmetic
* overflow. This will happen when `x > 709.78` which is not a very large number.
* It can be addressed by rewriting the formula into `x + math.log1p(math.exp(-x))` when `x > 0`.
*
* @param x a floating-point value as input.
* @return the result of `math.log(1 + math.exp(x))`.
*/
private[spark] def log1pExp(x: Double): Double = {
if (x > 0) {
x + math.log1p(math.exp(-x))
} else {
math.log1p(math.exp(x))
}
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala | Scala | apache-2.0 | 14,701 |
object Scl9437_Qualified {
def test(): Unit = {
val x: Result[_] = ???
x match {
case Result.<ref>Failure(x, y) =>
}
}
} | whorbowicz/intellij-scala | testdata/resolve/failed/overloadedUnapply/Scl9437_Qualified.scala | Scala | apache-2.0 | 142 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker
import com.rackspace.cloud.api.wadl.Converters._
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class GivenAWadlWithNestedResourcesAndMethodReferencesAndOddRoleNames extends FlatSpec with RaxRolesBehaviors {
val configs = Map[String, Config]("Config With Roles Enabled" -> configWithRolesEnabled,
"Config With Roles Enabled and Messsage Extensions Disabled" -> configWithRolesEnabledMessageExtDisabled,
"Config With Roles Enabled and Duplications Removed" -> configWithRolesEnabledDupsRemoved,
"Config With Roles Enabled and Header Checks Disabled" -> configWithRolesEnabledHeaderCheckDisabled,
"Config with Roles Enabled and Default Parameters Enabled" -> configWithRaxRolesEnabledDefaultsEnabled,
"Config with Roles Enabled, Default Parameters Enabled and Duplications Removed" -> configWithRaxRolesEnabledDupsRemovedDefaultsEnabled)
for ((description, configuration) <- configs) {
val validator = Validator((localWADLURI,
<application xmlns="http://wadl.dev.java.net/2009/02"
xmlns:rax="http://docs.rackspace.com/api"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tst="test://schema/a">
<grammars>
<schema elementFormDefault="qualified"
attributeFormDefault="unqualified"
xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
targetNamespace="test://schema/a">
<simpleType name="yesno">
<restriction base="xsd:string">
<enumeration value="yes"/>
<enumeration value="no"/>
</restriction>
</simpleType>
</schema>
</grammars>
<resources base="https://test.api.openstack.com">
<resource path="/a" rax:roles="a:admin-foo">
<method href="#putOnA" rax:roles="a:observer% a:observer wsp"/>
<resource path="/b" rax:roles="b:creator">
<method href="#postOnB"/>
<method href="#putOnB"/>
<method href="#deleteOnB" rax:roles="AR-Payments-Billing-Support b:admin"/>
</resource>
<resource path="{yn}" rax:roles="a:admin-foo">
<param name="yn" style="template" type="tst:yesno"/>
<param name="X-INT" style="header" type="xsd:string" required="true" default="999"/>
<method href="#yn"/>
</resource>
</resource>
</resources>
<method id="putOnA" name="PUT"/>
<method id="postOnB" name="POST"/>
<method id="putOnB" name="PUT" rax:roles="AR-Payments-Billing-Support"/>
<method id="deleteOnB" name="DELETE" rax:roles="b:foo"/>
<method id="yn" name="GET" />
</application>)
, configuration)
// PUT /a has resource level a:admin-foo, method level a:observer% and 'a:observer '
it should behave like accessIsAllowed(validator, "PUT", "/a", List("a:admin-foo"), description)
it should behave like accessIsAllowed(validator, "PUT", "/a", List("a:observer%"), description)
it should behave like accessIsAllowed(validator, "PUT", "/a", List("a:observer%", "a:admin-foo"), description)
it should behave like accessIsAllowed(validator, "PUT", "/a", List("a:observer wsp"), description)
it should behave like accessIsAllowed(validator, "PUT", "/a", List("a:observer wsp", "a:admin-foo"), description)
it should behave like accessIsForbidden(validator, "PUT", "/a", List("AR-Payments-Billing-Support"), description)
it should behave like accessIsForbidden(validator, "PUT", "/a", List("a:observer"), description)
it should behave like accessIsForbiddenWhenNoXRoles(validator, "PUT", "/a", description)
// DELETE /a has resource level a:admin-foo, method is not defined
it should behave like methodNotAllowed(validator, "DELETE", "/a", List("a:admin-foo"), description)
it should behave like methodNotAllowed(validator, "DELETE", "/a", List(), description)
// POST /a/b has parent resource level a:admin-foo, resource level b:creator
it should behave like accessIsAllowed(validator, "POST", "/a/b", List("a:admin-foo"), description)
it should behave like accessIsAllowed(validator, "POST", "/a/b", List("b:creator"), description)
it should behave like accessIsForbidden(validator, "POST", "/a/b", List("a:observer%"), description)
it should behave like accessIsForbidden(validator, "POST", "/a/b", List("a:observer wsp"), description)
it should behave like accessIsForbiddenWhenNoXRoles(validator, "POST", "/a/b", description)
// PUT /a/b has parent resource level a:admin-foo, resource level b:creator, method level AR-Payments-Billing-Support
it should behave like accessIsAllowed(validator, "PUT", "/a/b", List("a:admin-foo"), description)
it should behave like accessIsAllowed(validator, "PUT", "/a/b", List("b:creator"), description)
it should behave like accessIsAllowed(validator, "PUT", "/a/b", List("AR-Payments-Billing-Support", "a:foo"), description)
it should behave like accessIsForbidden(validator, "PUT", "/a/b", List("a:creator"), description)
it should behave like accessIsForbidden(validator, "PUT", "/a/b", List(), description)
it should behave like accessIsForbidden(validator, "PUT", "/a/b", List("observer"), description)
it should behave like accessIsForbiddenWhenNoXRoles(validator, "PUT", "/a/b", description)
// DELETE /a/b has parent resource level a:admin-foo, resource level b:creator, method level b:admin, AR-Payments-Billing-Support
it should behave like accessIsAllowed(validator, "DELETE", "/a/b", List("a:admin-foo"), description)
it should behave like accessIsAllowed(validator, "DELETE", "/a/b", List("b:creator"), description)
it should behave like accessIsAllowed(validator, "DELETE", "/a/b", List("AR-Payments-Billing-Support", "a:admin-foo"), description)
it should behave like accessIsAllowed(validator, "DELETE", "/a/b", List("b:admin"), description)
it should behave like accessIsForbidden(validator, "DELETE", "/a/b", List(), description)
it should behave like accessIsForbidden(validator, "DELETE", "/a/b", List("a:observer%"), description)
it should behave like accessIsForbidden(validator, "DELETE", "/a/b", List("a:observer wsp"), description)
it should behave like accessIsForbidden(validator, "DELETE", "/a/b", List("b:foo"), description)
it should behave like accessIsForbiddenWhenNoXRoles(validator, "DELETE", "/a/b", description)
// GET on /a/yes, /a/no, /a/foo
it should behave like accessIsAllowedWithHeader(validator, "GET", "/a/yes", List("a:admin-foo"), description)
it should behave like accessIsAllowedWithHeader(validator, "GET", "/a/no", List("a:admin-foo"), description)
it should behave like resourceNotFoundWithHeader(validator, "GET", "/a/foo", List("a:admin-foo"), description, List("'b'","yes","no"))
it should behave like accessIsAllowedWithHeader(validator, "GET", "/a/yes", List("a:admin-foo", "a:observer%"), description)
it should behave like accessIsAllowedWithHeader(validator, "GET", "/a/no", List("a:admin-foo", "a:observer%"), description)
it should behave like accessIsAllowedWithHeader(validator, "GET", "/a/yes", List("a:admin-foo", "a:observer wsp"), description)
it should behave like accessIsAllowedWithHeader(validator, "GET", "/a/no", List("a:admin-foo", "a:observer wsp"), description)
it should behave like resourceNotFoundWithHeader(validator, "GET", "/a/foo", List("a:admin-foo", "a:observer%"), description, List("'b'","yes","no"))
it should behave like resourceNotFoundWithHeader(validator, "GET", "/a/foo", List("a:admin-foo", "a:observer wsp"), description, List("'b'","yes","no"))
it should behave like accessIsForbiddenWithHeader(validator, "GET", "/a/yes", List("a:observer%"), description)
it should behave like accessIsForbiddenWithHeader(validator, "GET", "/a/no", List("a:observer%"), description)
it should behave like resourceNotFoundWithHeader(validator, "GET", "/a/foo", List("a:observer%"), description, List("'b'","yes","no"))
it should behave like accessIsForbiddenWithHeader(validator, "GET", "/a/yes", List("a:observer wsp"), description)
it should behave like accessIsForbiddenWithHeader(validator, "GET", "/a/no", List("a:observer wsp"), description)
it should behave like resourceNotFoundWithHeader(validator, "GET", "/a/foo", List("a:observer wsp"), description, List("'b'","yes","no"))
}
}
| rackerlabs/api-checker | core/src/test/scala/com/rackspace/com/papi/components/checker/GivenAWadlWithNestedResourcesAndMethodReferencesAndOddRoleNames.scala | Scala | apache-2.0 | 9,341 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich
package hadoop
package good
// Scala
import scala.collection.mutable.Buffer
// Specs2
import org.specs2.mutable.Specification
// Scalding
import com.twitter.scalding._
// Cascading
import cascading.tuple.TupleEntry
// Apache Thrift
import org.apache.thrift.TSerializer
import JobSpecHelpers._
import com.snowplowanalytics.snowplow.collectors.thrift.{
SnowplowRawEvent,
TrackerPayload ,
PayloadProtocol,
PayloadFormat
}
/**
* Holds the input and expected data
* for the test.
*/
object SnowplowRawEventLzoSpec {
val payloadData = "e=pp&page=Loading%20JSON%20data%20into%20Redshift%20-%20the%20challenges%20of%20quering%20JSON%20data%2C%20and%20how%20Snowplow%20can%20be%20used%20to%20meet%20those%20challenges&pp_mix=0&pp_max=1&pp_miy=64&pp_may=935&cx=eyJwYWdlIjp7InVybCI6ImJsb2cifX0&dtm=1398762054889&tid=612876&vp=1279x610&ds=1279x5614&vid=2&duid=44082d3af0e30126&p=web&tv=js-2.0.0&fp=2071613637&aid=snowplowweb&lang=fr&cs=UTF-8&tz=Europe%2FBerlin&tna=cloudfront&evn=com.snowplowanalytics&refr=http%3A%2F%2Fsnowplowanalytics.com%2Fservices%2Fpipelines.html&f_pdf=1&f_qt=1&f_realp=0&f_wma=0&f_dir=0&f_fla=1&f_java=1&f_gears=0&f_ag=0&res=1280x800&cd=24&cookie=1&url=http%3A%2F%2Fsnowplowanalytics.com%2Fblog%2F2013%2F11%2F20%2Floading-json-data-into-redshift%2F%23weaknesses"
val payload = new TrackerPayload(
PayloadProtocol.Http, PayloadFormat.HttpGet, payloadData
)
val snowplowRawEvent = new SnowplowRawEvent(1381175274000L, "collector", "UTF-8", "255.255.255.255");
snowplowRawEvent.setPayload(payload);
snowplowRawEvent.setNetworkUserId("8712a379-4bcb-46ee-815d-85f26540577f")
snowplowRawEvent.setUserAgent("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.8 Safari/537.36");
val serializer = new TSerializer
val binaryThrift = serializer.serialize(snowplowRawEvent)
val lines = List(
(binaryThrift, 1L)
)
val expected = List(
"snowplowweb",
"web",
EtlTimestamp,
"2013-10-07 19:47:54.000",
"2014-04-29 09:00:54.889",
"page_ping",
null, // We can't predict the event_id
"612876",
"cloudfront", // Tracker namespace
"js-2.0.0",
"collector",
EtlVersion,
null, // No user_id set
"255.255.255.x",
"2071613637",
"44082d3af0e30126",
"2",
"8712a379-4bcb-46ee-815d-85f26540577f",
null, // No geo-location for this IP address
null,
null,
null,
null,
null,
null,
null, // No additional MaxMind databases used
null,
null,
null,
"http://snowplowanalytics.com/blog/2013/11/20/loading-json-data-into-redshift/#weaknesses",
"Loading JSON data into Redshift - the challenges of quering JSON data, and how Snowplow can be used to meet those challenges",
"http://snowplowanalytics.com/services/pipelines.html",
"http",
"snowplowanalytics.com",
"80",
"/blog/2013/11/20/loading-json-data-into-redshift/",
null,
"weaknesses",
"http",
"snowplowanalytics.com",
"80",
"/services/pipelines.html",
null,
null,
"internal", // Internal referer
null,
null,
null, // Marketing campaign fields empty
null, //
null, //
null, //
null, //
"""{"page":{"url":"blog"}}""",
null, // Structured event fields empty
null, //
null, //
null, //
null, //
null, // Unstructured event field empty
null, // Transaction fields empty
null, //
null, //
null, //
null, //
null, //
null, //
null, //
null, // Transaction item fields empty
null, //
null, //
null, //
null, //
null, //
"0", // Page ping fields
"1", //
"64", //
"935", //
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.8 Safari/537.36", // previously "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36",
"Chrome 31", // previously "Chrome"
"Chrome",
"31.0.1650.8",// previously "34.0.1847.131"
"Browser",
"WEBKIT",
"fr",
"1",
"1",
"1",
"0",
"1",
"0",
"0",
"0",
"0",
"1",
"24",
"1279",
"610",
"Mac OS X",
"Mac OS X",
"Apple Inc.",
"Europe/Berlin",
"Computer",
"0",
"1280",
"800",
"UTF-8",
"1279",
"5614"
)
}
/**
* Integration test for the EtlJob:
*
* Test that a raw Thrift event is processed correctly.
* See https://github.com/snowplow/snowplow/issues/538
* Based on Apr2014CfLineSpec.
*/
class SnowplowRawEventLzoSpec extends Specification {
"A job which processes a RawThrift file containing 1 valid page view" should {
EtlJobSpec("thrift", "1", true, List("geo")).
source(FixedPathLzoRaw("inputFolder"), SnowplowRawEventLzoSpec.lines).
sink[TupleEntry](Tsv("outputFolder")){ buf : Buffer[TupleEntry] =>
"correctly output 1 page view" in {
buf.size must_== 1
val actual = buf.head
for (idx <- SnowplowRawEventLzoSpec.expected.indices) {
actual.getString(idx) must beFieldEqualTo(SnowplowRawEventLzoSpec.expected(idx), withIndex = idx)
}
}
}.
sink[TupleEntry](Tsv("exceptionsFolder")){ trap =>
"not trap any exceptions" in {
trap must beEmpty
}
}.
sink[String](Tsv("badFolder")){ error =>
"not write any bad rows" in {
error must beEmpty
}
}.
run.
finish
}
}
| wesley1001/snowplow | 3-enrich/scala-hadoop-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.hadoop/good/SnowplowRawEventLzoSpec.scala | Scala | apache-2.0 | 6,293 |
import play.api.mvc.{Action, Controller}
import play.api.data.validation.Constraint
import de.zalando.play.controllers._
import PlayBodyParsing._
import PlayValidations._
import scala.util._
/**
* This controller is re-generated after each change in the specification.
* Please only place your hand-written code between appropriate comments in the body of the controller.
*/
package nakadi.yaml {
class NakadiYaml extends NakadiYamlBase {
val nakadiHackGet_metrics = nakadiHackGet_metricsAction { _ =>
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackGet_metrics
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackGet_metrics
}
val nakadiHackGet_events_from_single_partition = nakadiHackGet_events_from_single_partitionAction { input: (String, String, TopicsTopicEventsGetStream_timeout, String, Int, TopicsTopicEventsGetStream_timeout, TopicsTopicEventsGetStream_timeout, TopicsTopicEventsGetStream_timeout) =>
val (start_from, partition, stream_limit, topic, batch_limit, batch_flush_timeout, stream_timeout, batch_keep_alive_limit) = input
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackGet_events_from_single_partition
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackGet_events_from_single_partition
}
val nakadiHackGet_partition = nakadiHackGet_partitionAction { input: (String, String) =>
val (topic, partition) = input
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackGet_partition
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackGet_partition
}
val nakadiHackGet_topics = nakadiHackGet_topicsAction { _ =>
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackGet_topics
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackGet_topics
}
val nakadiHackGet_events_from_multiple_partitions = nakadiHackGet_events_from_multiple_partitionsAction { input: (TopicsTopicEventsGetStream_timeout, TopicsTopicEventsGetStream_timeout, TopicsTopicEventsGetStream_timeout, String, Int, TopicsTopicEventsGetStream_timeout, String) =>
val (stream_timeout, stream_limit, batch_flush_timeout, x_nakadi_cursors, batch_limit, batch_keep_alive_limit, topic) = input
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackGet_events_from_multiple_partitions
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackGet_events_from_multiple_partitions
}
val nakadiHackPost_event = nakadiHackPost_eventAction { input: (String, TopicsTopicEventsBatchPostEvent) =>
val (topic, event) = input
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackPost_event
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackPost_event
}
val nakadiHackGet_partitions = nakadiHackGet_partitionsAction { (topic: String) =>
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackGet_partitions
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackGet_partitions
}
val nakadiHackPost_events = nakadiHackPost_eventsAction { input: (String, TopicsTopicEventsBatchPostEvent) =>
val (topic, event) = input
// ----- Start of unmanaged code area for action NakadiYaml.nakadiHackPost_events
NotImplementedYet
// ----- End of unmanaged code area for action NakadiYaml.nakadiHackPost_events
}
}
}
| zalando/play-swagger | play-scala-generator/src/test/resources/expected_results/controllers/nakadi_yaml.scala | Scala | mit | 3,943 |
package models
case class Product(ean: Long, name: String, description: String)
object Product {
var products = Set(
Product(5010255079763L, "Paperclips Large", "Large Plain Pack of 1000"),
Product(5018206244666L, "Giant Paperclips", "Giant Plain 51mm 100 pack"),
Product(5018306332812L, "Paperclip Giant Plain", "Giant Plain Pack of 10000"),
Product(5018306312913L, "No Tear Paper Clip", "No Tear Extra Large Pack of 1000"),
Product(5018206244611L, "Zebra Paperclips", "Zebra Length 28mm Assorted 150 Pack")
)
def findAll = products.toList.sortBy(_.ean)
}
| spolnik/scala-workspace | play/products/app/models/Product.scala | Scala | apache-2.0 | 586 |
package blended.security
import blended.security.internal.SecurityActivator
import blended.testsupport.pojosr.{PojoSrTestHelper, SimplePojoContainerSpec}
import blended.testsupport.scalatest.LoggingFreeSpecLike
import javax.security.auth.Subject
import javax.security.auth.login.LoginContext
import org.osgi.framework.BundleActivator
import org.scalatest.matchers.should.Matchers
import scala.util.Try
abstract class AbstractLoginSpec extends SimplePojoContainerSpec
with LoggingFreeSpecLike
with Matchers
with PojoSrTestHelper {
override def bundles : Seq[(String, BundleActivator)] = Seq(
"blended.security" -> new SecurityActivator()
)
def login(user : String, password : String) : Try[Subject] = Try {
val lc = new LoginContext("Test", new PasswordCallbackHandler(user, password.toCharArray()))
lc.login()
lc.getSubject()
}
}
| woq-blended/blended | blended.security.test/src/test/scala/blended/security/AbstractLoginSpec.scala | Scala | apache-2.0 | 865 |
package org.jetbrains.plugins.scala
package codeInspection.typeChecking
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.collections.MethodRepr
import org.jetbrains.plugins.scala.codeInspection.typeChecking.ComparingUnrelatedTypesInspection._
import org.jetbrains.plugins.scala.codeInspection.{AbstractInspection, InspectionBundle}
import org.jetbrains.plugins.scala.extensions.{PsiClassExt, ResolvesTo}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.Success
/**
* Nikolay.Tropin
* 5/30/13
*/
object ComparingUnrelatedTypesInspection {
val inspectionName = InspectionBundle.message("comparing.unrelated.types.name")
val inspectionId = "ComparingUnrelatedTypes"
private val seqFunctions = Seq("contains", "indexOf", "lastIndexOf")
def cannotBeCompared(type1: ScType, type2: ScType): Boolean = {
val types = Seq(type1, type2).map(tryExtractSingletonType)
val Seq(unboxed1, unboxed2) =
if (types.contains(Null)) types else types.map(StdType.unboxedType)
if (isNumericType(unboxed1) && isNumericType(unboxed2)) return false
ComparingUtil.isNeverSubType(unboxed1, unboxed2) && ComparingUtil.isNeverSubType(unboxed2, unboxed1)
}
def isNumericType(tp: ScType) = {
tp match {
case Byte | Char | Short | Int | Long | Float | Double => true
case ScDesignatorType(c: ScClass) => c.supers.headOption.map(_.qualifiedName).contains("scala.math.ScalaNumber")
case _ => false
}
}
private def tryExtractSingletonType(tp: ScType): ScType = ScType.extractDesignatorSingletonType(tp).getOrElse(tp)
}
class ComparingUnrelatedTypesInspection extends AbstractInspection(inspectionId, inspectionName){
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case MethodRepr(expr, Some(left), Some(oper), Seq(right)) if Seq("==", "!=", "ne", "eq", "equals") contains oper.refName =>
//getType() for the reference on the left side returns singleton type, little hack here
val leftOnTheRight = ScalaPsiElementFactory.createExpressionWithContextFromText(left.getText, right.getParent, right)
Seq(leftOnTheRight, right) map (_.getType()) match {
case Seq(Success(leftType, _), Success(rightType, _)) if cannotBeCompared(leftType, rightType) =>
holder.registerProblem(expr, inspectionName, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
case _ =>
}
case MethodRepr(_, Some(baseExpr), Some(ResolvesTo(fun: ScFunction)), Seq(arg, _*)) if mayNeedHighlighting(fun) =>
for {
ScParameterizedType(_, Seq(elemType)) <- baseExpr.getType().map(tryExtractSingletonType)
argType <- arg.getType()
if cannotBeCompared(elemType, argType)
} {
val message = s"$inspectionName: ${elemType.presentableText} and ${argType.presentableText}"
holder.registerProblem(arg, message, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
case IsInstanceOfCall(call) =>
val qualType = call.referencedExpr match {
case ScReferenceExpression.withQualifier(q) => q.getType().toOption
case _ => None
}
val argType = call.arguments.headOption.flatMap(_.getType().toOption)
for {
t1 <- qualType
t2 <- argType
if cannotBeCompared(t1, t2)
} {
holder.registerProblem(call, inspectionName, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
}
private def mayNeedHighlighting(fun: ScFunction): Boolean = {
if (!seqFunctions.contains(fun.name)) return false
val className = fun.containingClass.qualifiedName
className.startsWith("scala.collection") && className.contains("Seq") && seqFunctions.contains(fun.name) ||
Seq("scala.Option", "scala.Some").contains(className) && fun.name == "contains"
}
} | SergeevPavel/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/typeChecking/ComparingUnrelatedTypesInspection.scala | Scala | apache-2.0 | 4,215 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.time
import org.joda.time._
class RichPeriod(underlying: Period) {
def days: Int =
underlying.getDays
def hours: Int =
underlying.getHours
def millis: Int =
underlying.getMillis
def minutes: Int =
underlying.getMinutes
def months: Int =
underlying.getMonths
def seconds: Int =
underlying.getSeconds
def weeks: Int =
underlying.getWeeks
def years: Int =
underlying.getYears
def -(period: ReadablePeriod): Period =
underlying.minus(period)
def +(period: ReadablePeriod): Period =
underlying.plus(period)
def ago: DateTime =
StaticDateTime.now.minus(underlying)
def later: DateTime =
StaticDateTime.now.plus(underlying)
def from(dt: DateTime): DateTime =
dt.plus(underlying)
def before(dt: DateTime): DateTime =
dt.minus(underlying)
def standardDuration: Duration =
underlying.toStandardDuration
}
| scalaj/scalaj-time | src/main/scala/org/scala_tools/time/RichPeriod.scala | Scala | apache-2.0 | 1,513 |
/*
* Copyright 2013 David Savage
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.chronologicalthought.modula.osgi
import java.io._
import org.osgi.framework.launch.Framework
import org.osgi.framework.{BundleException, Constants, FrameworkEvent}
import org.chronologicalthought.modula.{ModuleState, Module}
/**
* @author David Savage
*/
class FrameworkImpl(config: Map[String, String], module: Module) extends AbstractBundle(config, module) with Framework {
override val getSymbolicName = Constants.SYSTEM_BUNDLE_SYMBOLICNAME
val getLocation = Constants.SYSTEM_BUNDLE_LOCATION
val getLastModified = System.currentTimeMillis
def init() = {}
def waitForStop(timeout: Long): FrameworkEvent = {
// TODO does wait for framework Resolved make sense?
module.waitFor(ModuleState.Resolved, timeout)
new FrameworkEvent(FrameworkEvent.STOPPED, this, null)
}
override def adapt[A](clazz: Class[A]): A = {
throw new IllegalStateException("Not yet implemented")
}
override def update() = {
stop()
start()
}
override def start(options: Int) {
super.start(options)
// TODO use framework hooks to hide this service
module.context.register(new BundleFactoryImpl(config, module.context), classOf[BundleFactory])
}
def update(input: InputStream) = {
input.close()
update
}
def uninstall() = {
throw new BundleException("Framework cannot be uninstalled")
}
}
| davemssavage/modula | osgi/src/main/scala/org/chronologicalthought/modula/osgi/FrameworkImpl.scala | Scala | apache-2.0 | 1,960 |
/*
* Copyright 2015 Baptiste
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bm.rules
/**
* .
* @author Baptiste Morin
*/
trait Result {
val status: Status
val entryRuleKeyPair: KeyPair[Entry, Rule]
val isError: Boolean
override def toString: String =
s"""Result for
| Entry(${entryRuleKeyPair.first}),
| Rule(${entryRuleKeyPair.second})
| Status: $status
""".stripMargin
}
| morinb/scala-rules | src/main/scala/org/bm/rules/Result.scala | Scala | apache-2.0 | 976 |
package polyite.pmpi
/**
* Do-Nothing dummy for when MPI is not used.
*/
object NoMPI extends IMPI {
override def Init(args : Array[String]) {}
override def Finalize() {}
override def rank() : Int = 0
override def size() : Int = 1
override def isRoot() : Boolean = true
override def getRightNeighborRank() : Int = 0
override def abortAllAndTerminate(reason : String, errorcode : Int = 0) = {}
override def sendString(str : String, dest : Int) {}
override def recvString(src : Int) : String = ""
} | stganser/polyite | src/polyite/pmpi/NoMPI.scala | Scala | mit | 524 |
package com.twitter.finagle.memcached.integration
import com.twitter.finagle.cacheresolver.CacheNodeGroup
import com.twitter.finagle.memcached.KetamaClientBuilder
import com.twitter.finagle.{Group, Name}
import com.twitter.io.Buf
import com.twitter.util.{Await, Future}
import java.net.{InetAddress, InetSocketAddress, SocketAddress}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfter, FunSuite}
@RunWith(classOf[JUnitRunner])
class KetamaClientTest extends FunSuite with BeforeAndAfter {
/**
* We already proved above that we can hit a real memcache server,
* so we can use our own for the partitioned client test.
*/
var server1: InProcessMemcached = null
var server2: InProcessMemcached = null
var address1: InetSocketAddress = null
var address2: InetSocketAddress = null
before {
server1 = new InProcessMemcached(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
address1 = server1.start().boundAddress.asInstanceOf[InetSocketAddress]
server2 = new InProcessMemcached(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
address2 = server2.start().boundAddress.asInstanceOf[InetSocketAddress]
}
after {
server1.stop()
server2.stop()
}
test("doesn't blow up") {
val client = KetamaClientBuilder()
.nodes("localhost:%d,localhost:%d".format(address1.getPort, address2.getPort))
.build()
Await.result(client.delete("foo"))
assert(Await.result(client.get("foo")) === None)
Await.result(client.set("foo", Buf.Utf8("bar")))
val Buf.Utf8(res) = Await.result(client.get("foo")).get
assert(res === "bar")
}
test("using Name doesn't blow up") {
val name = Name.bound(address1, address2)
val client = KetamaClientBuilder().dest(name).build()
Await.result(client.delete("foo"))
assert(Await.result(client.get("foo")) === None)
Await.result(client.set("foo", Buf.Utf8("bar")))
val Buf.Utf8(res) = Await.result(client.get("foo")).get
assert(res === "bar")
}
test("using Group[InetSocketAddress] doesn't blow up") {
val mutableGroup = Group(address1, address2).map{_.asInstanceOf[SocketAddress]}
val client = KetamaClientBuilder()
.group(CacheNodeGroup(mutableGroup, true))
.build()
Await.result(client.delete("foo"))
assert(Await.result(client.get("foo")) === None)
Await.result(client.set("foo", Buf.Utf8("bar")))
val Buf.Utf8(res) = Await.result(client.get("foo")).get
assert(res === "bar")
}
test("using custom keys doesn't blow up") {
val client = KetamaClientBuilder()
.nodes("localhost:%d:1:key1,localhost:%d:1:key2".format(address1.getPort, address2.getPort))
.build()
Await.result(client.delete("foo"))
assert(Await.result(client.get("foo")) === None)
Await.result(client.set("foo", Buf.Utf8("bar")))
val Buf.Utf8(res) = Await.result(client.get("foo")).get
assert(res === "bar")
}
test("even in future pool") {
lazy val client = KetamaClientBuilder()
.nodes("localhost:%d,localhost:%d".format(address1.getPort, address2.getPort))
.build()
val futureResult = Future.value(true) flatMap {
_ => client.get("foo")
}
assert(Await.result(futureResult) === None)
}
}
| lucaslanger/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/integration/KetamaClientTest.scala | Scala | apache-2.0 | 3,293 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack.Stop
import monix.execution.cancelables.SerialCancelable
import scala.util.control.NonFatal
import monix.execution.{Ack, Cancelable, Scheduler}
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
private[reactive] final class RestartUntilObservable[A](source: Observable[A], p: A => Boolean) extends Observable[A] {
self =>
def unsafeSubscribeFn(out: Subscriber[A]): Cancelable = {
val conn = SerialCancelable()
loop(out, conn)
conn
}
def loop(out: Subscriber[A], subscription: SerialCancelable): Unit = {
// Needs synchronization because we can have a race condition
// at least on the assignment of the cancelable.
synchronized {
subscription := source.unsafeSubscribeFn(new Subscriber[A] {
implicit val scheduler: Scheduler = out.scheduler
private[this] var isValidated = false
private[this] var isDone = false
def onNext(elem: A): Future[Ack] = {
// Stream was validated, so we can just stream the event.
if (isValidated) out.onNext(elem)
else {
// Protects calls to user code from within the operator and
// stream the error downstream if it happens, but if the
// error happens because of calls to `onNext` or other
// protocol calls, then the behavior should be undefined.
var streamErrors = true
try {
isValidated = p(elem)
streamErrors = false
if (isValidated) out.onNext(elem)
else {
// Oh noes, we have to resubscribe.
// First we make sure no other events can happen
isDone = true
// Then we force an asynchronous boundary and retry
out.scheduler.execute(new Runnable {
def run(): Unit = loop(out, subscription)
})
// Signal the current upstream to stop.
// Current upstream will also be cancel when the
// SerialCancelable gets assigned a new value.
Stop
}
} catch {
case NonFatal(ex) if streamErrors =>
onError(ex)
Stop
}
}
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
out.onError(ex)
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
out.onComplete()
}
})
()
}
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/RestartUntilObservable.scala | Scala | apache-2.0 | 3,347 |
package com.nthportal.euler.h0.t0
import com.nthportal.euler.ProjectEulerProblem
import com.nthportal.euler.maths.NumericFormat
object Problem4 extends ProjectEulerProblem {
override def apply(): Long = {
for {
i <- 100 to 999
j <- 100 to 999
product = i * j
if NumericFormat.isPalindrome(product)
} yield product
}.max
}
| NthPortal/euler-n-scala | src/main/scala/com/nthportal/euler/h0/t0/Problem4.scala | Scala | mit | 360 |
package com.s3dropbox.lambda
import com.amazonaws.services.lambda.runtime.events.S3Event
import com.amazonaws.services.lambda.runtime.events.models.s3.S3EventNotification.S3Entity
import com.amazonaws.services.lambda.runtime.{Context, RequestHandler}
import com.s3dropbox.lambda.LambdaMain._
import com.typesafe.scalalogging.LazyLogging
/**
* LambdaMain is the AWS Lambda entry point. This AWS Lambda function reacts off S3 notifications. The file is expected
* to be a zip file, which contains a hierarchy of PDFs and a manifest file. The manifest file is used to compare
* what is in Dropbox and what is published as part of the payload.
*/
class LambdaMain extends RequestHandler[S3Event, Unit] with LazyLogging {
override def handleRequest(event: S3Event, context: Context): Unit = {
val s3entity: S3Entity = event.getRecords.get(event.getRecords.size - 1).getS3
DossierMain.runDossier(DossierCliOpts(
sys.env(DbxCredentialS3BucketEnvVar),
sys.env(DbxCredentialS3KeyEnvVar),
sys.env(DbxCredentialKmsIdEnvVar),
s3entity.getBucket.getName,
s3entity.getObject.getKey
))
}
}
object LambdaMain {
val DbxCredentialS3BucketEnvVar: String = "DBX_CREDENTIAL_S3_BUCKET"
val DbxCredentialS3KeyEnvVar: String = "DBX_CREDENTIAL_S3_KEY"
val DbxCredentialKmsIdEnvVar: String = "DBX_CREDENTIAL_KMS_ID"
} | ErrorsAndGlitches/S3DropboxLambda | src/main/scala/com/s3dropbox/lambda/LambdaMain.scala | Scala | mit | 1,350 |
package com.ttpresentation.dao
import reactivemongo.bson.{BSONDocument, BSONObjectID}
import scala.concurrent.Future
import reactivemongo.api.DB
import reactivemongo.api.collections.default.BSONCollection
import akka.actor.ActorSystem
import com.typesafe.scalalogging.slf4j.Logging
import org.mindrot.jbcrypt.BCrypt
import com.ttpresentation.model.User
/**
* Created by ctcarrier on 3/3/14.
*/
trait UserDao {
def get(key: BSONObjectID): Future[Option[User]]
def save(v: User): Future[Option[User]]
}
class UserReactiveDao(db: DB, collection: BSONCollection, system: ActorSystem) extends UserDao with Logging {
implicit val context = system.dispatcher
def get(key: BSONObjectID): Future[Option[User]] = {
collection.find(BSONDocument("_id" -> key)).one[User]
}
def save(v: User): Future[Option[User]] = {
for {
toSave <- Future{v.copy(_id = Some(BSONObjectID.generate), password=BCrypt.hashpw(v.password, BCrypt.gensalt(10)))}
saved <- collection.save(toSave)
} yield Some(toSave)
}
}
| ctcarrier/ttpresentation | src/main/scala/com/ttpresentation/dao/UserDao.scala | Scala | mit | 1,033 |
package core.app
import akka.http.scaladsl.server.Route
import common.graphql.UserContext
import common.routes.frontend.FrontendRoute
import common.slick.SchemaInitializer
import core.guice.bindings.CoreBinding
import core.guice.injection.InjectorProvider._
import shapes.ServerModule
import scala.collection.mutable
class CoreModule extends ServerModule[UserContext, SchemaInitializer[_]] {
lazy val frontendRoute: FrontendRoute = inject[FrontendRoute]
override lazy val routes: mutable.HashSet[Route] = mutable.HashSet(frontendRoute.routes)
bindings = new CoreBinding
}
| sysgears/apollo-universal-starter-kit | modules/core/server-scala/src/main/scala/core/app/CoreModule.scala | Scala | mit | 584 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{File, FilenameFilter}
import java.nio.file.{Files, Paths}
import scala.collection.mutable.HashSet
import scala.concurrent.duration._
import org.apache.commons.io.FileUtils
import org.apache.spark.CleanerListener
import org.apache.spark.executor.DataReadMethod._
import org.apache.spark.executor.DataReadMethod.DataReadMethod
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.TempTableAlreadyExistsException
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, Join, JoinStrategyHint, SHUFFLE_HASH}
import org.apache.spark.sql.catalyst.util.DateTimeConstants
import org.apache.spark.sql.execution.{ExecSubqueryExpression, RDDScanExec, SparkPlan}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.columnar._
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.storage.StorageLevel.{MEMORY_AND_DISK_2, MEMORY_ONLY}
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.{AccumulatorContext, Utils}
private case class BigData(s: String)
class CachedTableSuite extends QueryTest with SQLTestUtils
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
setupTestData()
override def afterEach(): Unit = {
try {
spark.catalog.clearCache()
} finally {
super.afterEach()
}
}
def rddIdOf(tableName: String): Int = {
val plan = spark.table(tableName).queryExecution.sparkPlan
plan.collect {
case InMemoryTableScanExec(_, _, relation) =>
relation.cacheBuilder.cachedColumnBuffers.id
case _ =>
fail(s"Table $tableName is not cached\\n" + plan)
}.head
}
def isMaterialized(rddId: Int): Boolean = {
val maybeBlock = sparkContext.env.blockManager.get(RDDBlockId(rddId, 0))
maybeBlock.foreach(_ => sparkContext.env.blockManager.releaseLock(RDDBlockId(rddId, 0)))
maybeBlock.nonEmpty
}
def isExpectStorageLevel(rddId: Int, level: DataReadMethod): Boolean = {
val maybeBlock = sparkContext.env.blockManager.get(RDDBlockId(rddId, 0))
val isExpectLevel = maybeBlock.forall(_.readMethod === level)
maybeBlock.foreach(_ => sparkContext.env.blockManager.releaseLock(RDDBlockId(rddId, 0)))
maybeBlock.nonEmpty && isExpectLevel
}
private def getNumInMemoryRelations(ds: Dataset[_]): Int = {
val plan = ds.queryExecution.withCachedData
var sum = plan.collect { case _: InMemoryRelation => 1 }.sum
plan.transformAllExpressions {
case e: SubqueryExpression =>
sum += getNumInMemoryRelations(e.plan)
e
}
sum
}
private def getNumInMemoryTablesInSubquery(plan: SparkPlan): Int = {
plan.expressions.flatMap(_.collect {
case sub: ExecSubqueryExpression => getNumInMemoryTablesRecursively(sub.plan)
}).sum
}
private def getNumInMemoryTablesRecursively(plan: SparkPlan): Int = {
collect(plan) {
case inMemoryTable @ InMemoryTableScanExec(_, _, relation) =>
getNumInMemoryTablesRecursively(relation.cachedPlan) +
getNumInMemoryTablesInSubquery(inMemoryTable) + 1
case p =>
getNumInMemoryTablesInSubquery(p)
}.sum
}
test("cache temp table") {
withTempView("tempTable") {
testData.select("key").createOrReplaceTempView("tempTable")
assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0)
spark.catalog.cacheTable("tempTable")
assertCached(sql("SELECT COUNT(*) FROM tempTable"))
uncacheTable("tempTable")
}
}
test("unpersist an uncached table will not raise exception") {
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = true)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = false)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.persist()
assert(None != spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = true)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = false)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
}
test("cache table as select") {
withTempView("tempTable") {
sql("CACHE TABLE tempTable AS SELECT key FROM testData")
assertCached(sql("SELECT COUNT(*) FROM tempTable"))
uncacheTable("tempTable")
}
}
test("cache table as select - existing temp view") {
withTempView("tempView") {
sql("CREATE TEMPORARY VIEW tempView as SELECT 1")
val e = intercept[TempTableAlreadyExistsException] {
sql("CACHE TABLE tempView AS SELECT 1")
}
assert(e.getMessage.contains("Temporary view 'tempView' already exists"))
}
}
test("uncaching temp table") {
withTempView("tempTable1", "tempTable2") {
testData.select("key").createOrReplaceTempView("tempTable1")
testData.select("key").createOrReplaceTempView("tempTable2")
spark.catalog.cacheTable("tempTable1")
assertCached(sql("SELECT COUNT(*) FROM tempTable1"))
assertCached(sql("SELECT COUNT(*) FROM tempTable2"))
// Is this valid?
uncacheTable("tempTable2")
// Should this be cached?
assertCached(sql("SELECT COUNT(*) FROM tempTable1"), 0)
}
}
test("too big for memory") {
withTempView("bigData") {
val data = "*" * 1000
sparkContext.parallelize(1 to 200000, 1).map(_ => BigData(data)).toDF()
.createOrReplaceTempView("bigData")
spark.table("bigData").persist(StorageLevel.MEMORY_AND_DISK)
assert(spark.table("bigData").count() === 200000L)
spark.table("bigData").unpersist(blocking = true)
}
}
test("calling .cache() should use in-memory columnar caching") {
spark.table("testData").cache()
assertCached(spark.table("testData"))
spark.table("testData").unpersist(blocking = true)
}
test("calling .unpersist() should drop in-memory columnar cache") {
spark.table("testData").cache()
spark.table("testData").count()
spark.table("testData").unpersist(blocking = true)
assertCached(spark.table("testData"), 0)
}
test("isCached") {
spark.catalog.cacheTable("testData")
assertCached(spark.table("testData"))
assert(spark.table("testData").queryExecution.withCachedData match {
case _: InMemoryRelation => true
case _ => false
})
uncacheTable("testData")
assert(!spark.catalog.isCached("testData"))
assert(spark.table("testData").queryExecution.withCachedData match {
case _: InMemoryRelation => false
case _ => true
})
}
test("SPARK-1669: cacheTable should be idempotent") {
assert(!spark.table("testData").logicalPlan.isInstanceOf[InMemoryRelation])
spark.catalog.cacheTable("testData")
assertCached(spark.table("testData"))
assertResult(1, "InMemoryRelation not found, testData should have been cached") {
getNumInMemoryRelations(spark.table("testData"))
}
spark.catalog.cacheTable("testData")
assertResult(0, "Double InMemoryRelations found, cacheTable() is not idempotent") {
spark.table("testData").queryExecution.withCachedData.collect {
case r: InMemoryRelation if r.cachedPlan.isInstanceOf[InMemoryTableScanExec] => r
}.size
}
uncacheTable("testData")
}
test("read from cached table and uncache") {
spark.catalog.cacheTable("testData")
checkAnswer(spark.table("testData"), testData.collect().toSeq)
assertCached(spark.table("testData"))
uncacheTable("testData")
checkAnswer(spark.table("testData"), testData.collect().toSeq)
assertCached(spark.table("testData"), 0)
}
test("SELECT star from cached table") {
withTempView("selectStar") {
sql("SELECT * FROM testData").createOrReplaceTempView("selectStar")
spark.catalog.cacheTable("selectStar")
checkAnswer(
sql("SELECT * FROM selectStar WHERE key = 1"),
Seq(Row(1, "1")))
uncacheTable("selectStar")
}
}
test("Self-join cached") {
val unCachedAnswer =
sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key").collect()
spark.catalog.cacheTable("testData")
checkAnswer(
sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key"),
unCachedAnswer.toSeq)
uncacheTable("testData")
}
test("'CACHE TABLE' and 'UNCACHE TABLE' SQL statement") {
sql("CACHE TABLE testData")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(
isMaterialized(rddId),
"Eagerly cached in-memory table should have already been materialized")
sql("UNCACHE TABLE testData")
assert(!spark.catalog.isCached("testData"), "Table 'testData' should not be cached")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
test("CACHE TABLE tableName AS SELECT * FROM anotherTable") {
withTempView("testCacheTable") {
sql("CACHE TABLE testCacheTable AS SELECT * FROM testData")
assertCached(spark.table("testCacheTable"))
val rddId = rddIdOf("testCacheTable")
assert(
isMaterialized(rddId),
"Eagerly cached in-memory table should have already been materialized")
uncacheTable("testCacheTable")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
}
test("CACHE TABLE tableName AS SELECT ...") {
withTempView("testCacheTable") {
sql("CACHE TABLE testCacheTable AS SELECT key FROM testData LIMIT 10")
assertCached(spark.table("testCacheTable"))
val rddId = rddIdOf("testCacheTable")
assert(
isMaterialized(rddId),
"Eagerly cached in-memory table should have already been materialized")
uncacheTable("testCacheTable")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
}
test("CACHE LAZY TABLE tableName") {
sql("CACHE LAZY TABLE testData")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(
!isMaterialized(rddId),
"Lazily cached in-memory table shouldn't be materialized eagerly")
sql("SELECT COUNT(*) FROM testData").collect()
assert(
isMaterialized(rddId),
"Lazily cached in-memory table should have been materialized")
uncacheTable("testData")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
private def assertStorageLevel(cacheOptions: String, level: DataReadMethod): Unit = {
sql(s"CACHE TABLE testData OPTIONS$cacheOptions")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(isExpectStorageLevel(rddId, level))
}
test("SQL interface support storageLevel(DISK_ONLY)") {
assertStorageLevel("('storageLevel' 'DISK_ONLY')", Disk)
}
test("SQL interface support storageLevel(DISK_ONLY) with invalid options") {
assertStorageLevel("('storageLevel' 'DISK_ONLY', 'a' '1', 'b' '2')", Disk)
}
test("SQL interface support storageLevel(MEMORY_ONLY)") {
assertStorageLevel("('storageLevel' 'MEMORY_ONLY')", Memory)
}
test("SQL interface cache SELECT ... support storageLevel(DISK_ONLY)") {
withTempView("testCacheSelect") {
sql("CACHE TABLE testCacheSelect OPTIONS('storageLevel' 'DISK_ONLY') SELECT * FROM testData")
assertCached(spark.table("testCacheSelect"))
val rddId = rddIdOf("testCacheSelect")
assert(isExpectStorageLevel(rddId, Disk))
}
}
test("SQL interface support storageLevel(Invalid StorageLevel)") {
val message = intercept[IllegalArgumentException] {
sql("CACHE TABLE testData OPTIONS('storageLevel' 'invalid_storage_level')")
}.getMessage
assert(message.contains("Invalid StorageLevel: INVALID_STORAGE_LEVEL"))
}
test("SQL interface support storageLevel(with LAZY)") {
sql("CACHE LAZY TABLE testData OPTIONS('storageLevel' 'disk_only')")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(
!isMaterialized(rddId),
"Lazily cached in-memory table shouldn't be materialized eagerly")
sql("SELECT COUNT(*) FROM testData").collect()
assert(
isMaterialized(rddId),
"Lazily cached in-memory table should have been materialized")
assert(isExpectStorageLevel(rddId, Disk))
}
test("InMemoryRelation statistics") {
sql("CACHE TABLE testData")
spark.table("testData").queryExecution.withCachedData.collect {
case cached: InMemoryRelation =>
val actualSizeInBytes = (1 to 100).map(i => 4 + i.toString.length + 4).sum
assert(cached.stats.sizeInBytes === actualSizeInBytes)
}
}
test("Drops temporary table") {
withTempView("t1") {
testData.select("key").createOrReplaceTempView("t1")
spark.table("t1")
spark.catalog.dropTempView("t1")
intercept[AnalysisException](spark.table("t1"))
}
}
test("Drops cached temporary table") {
withTempView("t1", "t2") {
testData.select("key").createOrReplaceTempView("t1")
testData.select("key").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
spark.catalog.dropTempView("t1")
intercept[AnalysisException](spark.table("t1"))
assert(!spark.catalog.isCached("t2"))
}
}
test("Clear all cache") {
withTempView("t1", "t2") {
sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
spark.catalog.clearCache()
assert(spark.sharedState.cacheManager.isEmpty)
sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
sql("Clear CACHE")
assert(spark.sharedState.cacheManager.isEmpty)
}
}
test("Ensure accumulators to be cleared after GC when uncacheTable") {
withTempView("t1", "t2") {
sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
sql("SELECT * FROM t1").count()
sql("SELECT * FROM t2").count()
sql("SELECT * FROM t1").count()
sql("SELECT * FROM t2").count()
val toBeCleanedAccIds = new HashSet[Long]
val accId1 = spark.table("t1").queryExecution.withCachedData.collect {
case i: InMemoryRelation => i.cacheBuilder.sizeInBytesStats.id
}.head
toBeCleanedAccIds += accId1
val accId2 = spark.table("t1").queryExecution.withCachedData.collect {
case i: InMemoryRelation => i.cacheBuilder.sizeInBytesStats.id
}.head
toBeCleanedAccIds += accId2
val cleanerListener = new CleanerListener {
def rddCleaned(rddId: Int): Unit = {}
def shuffleCleaned(shuffleId: Int): Unit = {}
def broadcastCleaned(broadcastId: Long): Unit = {}
def accumCleaned(accId: Long): Unit = {
toBeCleanedAccIds.synchronized { toBeCleanedAccIds -= accId }
}
def checkpointCleaned(rddId: Long): Unit = {}
}
spark.sparkContext.cleaner.get.attachListener(cleanerListener)
uncacheTable("t1")
uncacheTable("t2")
System.gc()
eventually(timeout(10.seconds)) {
assert(toBeCleanedAccIds.synchronized { toBeCleanedAccIds.isEmpty },
"batchStats accumulators should be cleared after GC when uncacheTable")
}
assert(AccumulatorContext.get(accId1).isEmpty)
assert(AccumulatorContext.get(accId2).isEmpty)
}
}
test("SPARK-10327 Cache Table is not working while subquery has alias in its project list") {
withTempView("abc") {
sparkContext.parallelize((1, 1) :: (2, 2) :: Nil)
.toDF("key", "value").selectExpr("key", "value", "key+1").createOrReplaceTempView("abc")
spark.catalog.cacheTable("abc")
val sparkPlan = sql(
"""select a.key, b.key, c.key from
|abc a join abc b on a.key=b.key
|join abc c on a.key=c.key""".stripMargin).queryExecution.sparkPlan
assert(sparkPlan.collect { case e: InMemoryTableScanExec => e }.size === 3)
assert(sparkPlan.collect { case e: RDDScanExec => e }.size === 0)
}
}
/**
* Verifies that the plan for `df` contains `expected` number of Exchange operators.
*/
private def verifyNumExchanges(df: DataFrame, expected: Int): Unit = {
assert(
collect(df.queryExecution.executedPlan) { case e: ShuffleExchangeExec => e }.size == expected)
}
test("A cached table preserves the partitioning and ordering of its cached SparkPlan") {
val table3x = testData.union(testData).union(testData)
table3x.createOrReplaceTempView("testData3x")
sql("SELECT key, value FROM testData3x ORDER BY key").createOrReplaceTempView("orderedTable")
spark.catalog.cacheTable("orderedTable")
assertCached(spark.table("orderedTable"))
// Should not have an exchange as the query is already sorted on the group by key.
verifyNumExchanges(sql("SELECT key, count(*) FROM orderedTable GROUP BY key"), 0)
checkAnswer(
sql("SELECT key, count(*) FROM orderedTable GROUP BY key ORDER BY key"),
sql("SELECT key, count(*) FROM testData3x GROUP BY key ORDER BY key").collect())
uncacheTable("orderedTable")
spark.catalog.dropTempView("orderedTable")
// Set up two tables distributed in the same way. Try this with the data distributed into
// different number of partitions.
for (numPartitions <- 1 until 10 by 4) {
withTempView("t1", "t2") {
testData.repartition(numPartitions, $"key").createOrReplaceTempView("t1")
testData2.repartition(numPartitions, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
// Joining them should result in no exchanges.
verifyNumExchanges(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a"), 0)
checkAnswer(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a"),
sql("SELECT * FROM testData t1 JOIN testData2 t2 ON t1.key = t2.a"))
// Grouping on the partition key should result in no exchanges
verifyNumExchanges(sql("SELECT count(*) FROM t1 GROUP BY key"), 0)
checkAnswer(sql("SELECT count(*) FROM t1 GROUP BY key"),
sql("SELECT count(*) FROM testData GROUP BY key"))
uncacheTable("t1")
uncacheTable("t2")
}
}
// Distribute the tables into non-matching number of partitions. Need to shuffle one side.
withTempView("t1", "t2") {
testData.repartition(6, $"key").createOrReplaceTempView("t1")
testData2.repartition(3, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).outputPartitioning.numPartitions === 6)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
// One side of join is not partitioned in the desired way. Need to shuffle one side.
withTempView("t1", "t2") {
testData.repartition(6, $"value").createOrReplaceTempView("t1")
testData2.repartition(6, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).outputPartitioning.numPartitions === 6)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
withTempView("t1", "t2") {
testData.repartition(6, $"value").createOrReplaceTempView("t1")
testData2.repartition(12, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).
outputPartitioning.numPartitions === 12)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
// One side of join is not partitioned in the desired way. Since the number of partitions of
// the side that has already partitioned is smaller than the side that is not partitioned,
// we shuffle both side.
withTempView("t1", "t2") {
testData.repartition(6, $"value").createOrReplaceTempView("t1")
testData2.repartition(3, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 2)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
// repartition's column ordering is different from group by column ordering.
// But they use the same set of columns.
withTempView("t1") {
testData.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
val query = sql("SELECT value, key from t1 group by key, value")
verifyNumExchanges(query, 0)
checkAnswer(
query,
testData.distinct().select($"value", $"key"))
uncacheTable("t1")
}
// repartition's column ordering is different from join condition's column ordering.
// We will still shuffle because hashcodes of a row depend on the column ordering.
// If we do not shuffle, we may actually partition two tables in totally two different way.
// See PartitioningSuite for more details.
withTempView("t1", "t2") {
val df1 = testData
df1.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
val df2 = testData2.select($"a", $"b".cast("string"))
df2.repartition(6, $"a", $"b").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query =
sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a and t1.value = t2.b")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).outputPartitioning.numPartitions === 6)
checkAnswer(
query,
df1.join(df2, $"key" === $"a" && $"value" === $"b").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
}
test("SPARK-15870 DataFrame can't execute after uncacheTable") {
withTempView("selectStar") {
val selectStar = sql("SELECT * FROM testData WHERE key = 1")
selectStar.createOrReplaceTempView("selectStar")
spark.catalog.cacheTable("selectStar")
checkAnswer(
selectStar,
Seq(Row(1, "1")))
uncacheTable("selectStar")
checkAnswer(
selectStar,
Seq(Row(1, "1")))
}
}
test("SPARK-15915 Logical plans should use canonicalized plan when override sameResult") {
withTempView("localRelation") {
val localRelation = Seq(1, 2, 3).toDF()
localRelation.createOrReplaceTempView("localRelation")
spark.catalog.cacheTable("localRelation")
assert(getNumInMemoryRelations(localRelation) == 1)
}
}
test("SPARK-19093 Caching in side subquery") {
withTempView("t1") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
val ds =
sql(
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t1)
""".stripMargin)
assert(getNumInMemoryRelations(ds) == 2)
}
}
test("SPARK-19093 scalar and nested predicate query") {
withTempView("t1", "t2", "t3", "t4") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
Seq(1).toDF("c1").createOrReplaceTempView("t3")
Seq(1).toDF("c1").createOrReplaceTempView("t4")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
spark.catalog.cacheTable("t3")
spark.catalog.cacheTable("t4")
// Nested predicate subquery
val ds =
sql(
"""
|SELECT * FROM t1
|WHERE
|c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1))
""".stripMargin)
assert(getNumInMemoryRelations(ds) == 3)
// Scalar subquery and predicate subquery
val ds2 =
sql(
"""
|SELECT * FROM (SELECT c1, max(c1) FROM t1 GROUP BY c1)
|WHERE
|c1 = (SELECT max(c1) FROM t2 GROUP BY c1)
|OR
|EXISTS (SELECT c1 FROM t3)
|OR
|c1 IN (SELECT c1 FROM t4)
""".stripMargin)
assert(getNumInMemoryRelations(ds2) == 4)
}
}
test("SPARK-19765: UNCACHE TABLE should un-cache all cached plans that refer to this table") {
withTable("t") {
withTempPath { path =>
Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath)
sql(s"CREATE TABLE t USING parquet LOCATION '${path.toURI}'")
spark.catalog.cacheTable("t")
spark.table("t").select($"i").cache()
checkAnswer(spark.table("t").select($"i"), Row(1))
assertCached(spark.table("t").select($"i"))
Utils.deleteRecursively(path)
spark.sessionState.catalog.refreshTable(TableIdentifier("t"))
uncacheTable("t")
assert(spark.table("t").select($"i").count() == 0)
assert(getNumInMemoryRelations(spark.table("t").select($"i")) == 0)
}
}
}
test("refreshByPath should refresh all cached plans with the specified path") {
withTempDir { dir =>
val path = dir.getCanonicalPath()
spark.range(10).write.mode("overwrite").parquet(path)
spark.read.parquet(path).cache()
spark.read.parquet(path).filter($"id" > 4).cache()
assert(spark.read.parquet(path).filter($"id" > 4).count() == 5)
spark.range(20).write.mode("overwrite").parquet(path)
spark.catalog.refreshByPath(path)
assert(spark.read.parquet(path).count() == 20)
assert(spark.read.parquet(path).filter($"id" > 4).count() == 15)
}
}
test("SPARK-19993 simple subquery caching") {
withTempView("t1", "t2") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
val sql1 =
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t2)
""".stripMargin
sql(sql1).cache()
val cachedDs = sql(sql1)
assert(getNumInMemoryRelations(cachedDs) == 1)
// Additional predicate in the subquery plan should cause a cache miss
val cachedMissDs =
sql(
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t2 where c1 = 0)
""".stripMargin)
assert(getNumInMemoryRelations(cachedMissDs) == 0)
}
}
test("SPARK-19993 subquery caching with correlated predicates") {
withTempView("t1", "t2") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(1).toDF("c1").createOrReplaceTempView("t2")
// Simple correlated predicate in subquery
val sqlText =
"""
|SELECT * FROM t1
|WHERE
|t1.c1 in (SELECT t2.c1 FROM t2 where t1.c1 = t2.c1)
""".stripMargin
sql(sqlText).cache()
val cachedDs = sql(sqlText)
assert(getNumInMemoryRelations(cachedDs) == 1)
}
}
test("SPARK-19993 subquery with cached underlying relation") {
withTempView("t1") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
// underlying table t1 is cached as well as the query that refers to it.
val sqlText =
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t1)
""".stripMargin
val ds = sql(sqlText)
assert(getNumInMemoryRelations(ds) == 2)
val cachedDs = sql(sqlText).cache()
assert(getNumInMemoryTablesRecursively(cachedDs.queryExecution.sparkPlan) == 3)
}
}
test("SPARK-19993 nested subquery caching and scalar + predicate subqueries") {
withTempView("t1", "t2", "t3", "t4") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
Seq(1).toDF("c1").createOrReplaceTempView("t3")
Seq(1).toDF("c1").createOrReplaceTempView("t4")
// Nested predicate subquery
val sql1 =
"""
|SELECT * FROM t1
|WHERE
|c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1))
""".stripMargin
sql(sql1).cache()
val cachedDs = sql(sql1)
assert(getNumInMemoryRelations(cachedDs) == 1)
// Scalar subquery and predicate subquery
val sql2 =
"""
|SELECT * FROM (SELECT c1, max(c1) FROM t1 GROUP BY c1)
|WHERE
|c1 = (SELECT max(c1) FROM t2 GROUP BY c1)
|OR
|EXISTS (SELECT c1 FROM t3)
|OR
|c1 IN (SELECT c1 FROM t4)
""".stripMargin
sql(sql2).cache()
val cachedDs2 = sql(sql2)
assert(getNumInMemoryRelations(cachedDs2) == 1)
}
}
test("SPARK-23312: vectorized cache reader can be disabled") {
Seq(true, false).foreach { vectorized =>
withSQLConf(SQLConf.CACHE_VECTORIZED_READER_ENABLED.key -> vectorized.toString) {
val df = spark.range(10).cache()
df.queryExecution.executedPlan.foreach {
case i: InMemoryTableScanExec =>
assert(i.supportsColumnar == vectorized)
case _ =>
}
}
}
}
private def checkIfNoJobTriggered[T](f: => T): T = {
var numJobTriggered = 0
val jobListener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
numJobTriggered += 1
}
}
sparkContext.addSparkListener(jobListener)
try {
val result = f
sparkContext.listenerBus.waitUntilEmpty()
assert(numJobTriggered === 0)
result
} finally {
sparkContext.removeSparkListener(jobListener)
}
}
test("SPARK-23880 table cache should be lazy and don't trigger any jobs") {
val cachedData = checkIfNoJobTriggered {
spark.range(1002).filter($"id" > 1000).orderBy($"id".desc).cache()
}
assert(cachedData.collect === Seq(1001))
}
test("SPARK-24596 Non-cascading Cache Invalidation - uncache temporary view") {
withTempView("t1", "t2") {
sql("CACHE TABLE t1 AS SELECT * FROM testData WHERE key > 1")
sql("CACHE TABLE t2 as SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
sql("UNCACHE TABLE t1")
assert(!spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
}
}
test("SPARK-24596, SPARK-34052: cascading cache invalidation - drop temporary view") {
Seq(true, false).foreach { storeAnalyzed =>
withSQLConf(SQLConf.STORE_ANALYZED_PLAN_FOR_VIEW.key -> storeAnalyzed.toString) {
withTempView("t1", "t2") {
sql("CACHE TABLE t1 AS SELECT * FROM testData WHERE key > 1")
sql("CACHE TABLE t2 as SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
val oldView = spark.table("t2")
sql("DROP VIEW t1")
// dropping a temp view trigger cache invalidation on dependents iff the config is
// turned off
assert(storeAnalyzed ==
spark.sharedState.cacheManager.lookupCachedData(oldView).isDefined)
if (!storeAnalyzed) {
// t2 should become invalid after t1 is dropped
val e = intercept[AnalysisException](spark.catalog.isCached("t2"))
assert(e.message.contains(s"Table or view not found"))
}
}
}
}
}
test("SPARK-24596, SPARK-34052: cascading cache invalidation - drop persistent view") {
Seq(true, false).foreach { storeAnalyzed =>
withSQLConf(SQLConf.STORE_ANALYZED_PLAN_FOR_VIEW.key -> storeAnalyzed.toString) {
withTable("t") {
spark.range(1, 10).toDF("key").withColumn("value", $"key" * 2)
.write.format("json").saveAsTable("t")
withView("t1") {
withTempView("t2") {
sql("CREATE VIEW t1 AS SELECT * FROM t WHERE key > 1")
sql("CACHE TABLE t1")
sql("CACHE TABLE t2 AS SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
val oldView = spark.table("t2")
sql("DROP VIEW t1")
// dropping a permanent view always trigger cache invalidation on dependents
assert(spark.sharedState.cacheManager.lookupCachedData(oldView).isEmpty)
if (!storeAnalyzed) {
// t2 should become invalid after t1 is dropped
val e = intercept[AnalysisException](spark.catalog.isCached("t2"))
assert(e.message.contains(s"Table or view not found"))
}
}
}
}
}
}
}
test("SPARK-24596 Non-cascading Cache Invalidation - uncache table") {
withTable("t") {
spark.range(1, 10).toDF("key").withColumn("value", $"key" * 2)
.write.format("json").saveAsTable("t")
withTempView("t1", "t2") {
sql("CACHE TABLE t")
sql("CACHE TABLE t1 AS SELECT * FROM t WHERE key > 1")
sql("CACHE TABLE t2 AS SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t"))
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
sql("UNCACHE TABLE t")
assert(!spark.catalog.isCached("t"))
assert(!spark.catalog.isCached("t1"))
assert(!spark.catalog.isCached("t2"))
}
}
}
test("Cache should respect the hint") {
def testHint(df: Dataset[_], expectedHint: JoinStrategyHint): Unit = {
val df2 = spark.range(2000).cache()
df2.count()
def checkHintExists(): Unit = {
// Test the broadcast hint.
val joinPlan = df.join(df2, "id").queryExecution.optimizedPlan
val joinHints = joinPlan.collect {
case Join(_, _, _, _, hint) => hint
}
assert(joinHints.size == 1)
assert(joinHints(0).leftHint.get.strategy.contains(expectedHint))
assert(joinHints(0).rightHint.isEmpty)
}
// Make sure the hint does exist when `df` is not cached.
checkHintExists()
df.cache()
try {
df.count()
// Make sure the hint still exists when `df` is cached.
checkHintExists()
} finally {
// Clean-up
df.unpersist()
}
}
// The hint is the root node
testHint(broadcast(spark.range(1000)), BROADCAST)
// The hint is under subquery alias
testHint(broadcast(spark.range(1000)).as("df"), BROADCAST)
// The hint is under filter
testHint(broadcast(spark.range(1000)).filter($"id" > 100), BROADCAST)
// If there are 2 adjacent hints, the top one takes effect.
testHint(
spark.range(1000)
.hint("SHUFFLE_MERGE")
.hint("SHUFFLE_HASH")
.as("df"),
SHUFFLE_HASH)
}
test("analyzes column statistics in cached query") {
def query(): DataFrame = {
spark.range(100)
.selectExpr("id % 3 AS c0", "id % 5 AS c1", "2 AS c2")
.groupBy("c0")
.agg(avg("c1").as("v1"), sum("c2").as("v2"))
}
// First, checks if there is no column statistic in cached query
val queryStats1 = query().cache.queryExecution.optimizedPlan.stats.attributeStats
assert(queryStats1.map(_._1.name).isEmpty)
val cacheManager = spark.sharedState.cacheManager
val cachedData = cacheManager.lookupCachedData(query().logicalPlan)
assert(cachedData.isDefined)
val queryAttrs = cachedData.get.plan.output
assert(queryAttrs.size === 3)
val (c0, v1, v2) = (queryAttrs(0), queryAttrs(1), queryAttrs(2))
// Analyzes one column in the query output
cacheManager.analyzeColumnCacheQuery(spark, cachedData.get, v1 :: Nil)
val queryStats2 = query().queryExecution.optimizedPlan.stats.attributeStats
assert(queryStats2.map(_._1.name).toSet === Set("v1"))
// Analyzes two more columns
cacheManager.analyzeColumnCacheQuery(spark, cachedData.get, c0 :: v2 :: Nil)
val queryStats3 = query().queryExecution.optimizedPlan.stats.attributeStats
assert(queryStats3.map(_._1.name).toSet === Set("c0", "v1", "v2"))
}
test("SPARK-27248 refreshTable should recreate cache with same cache name and storage level") {
// This section tests when a table is cached with its qualified name but it is refreshed with
// its unqualified name.
withTempDatabase { db =>
withTempPath { path =>
withTable(s"$db.cachedTable") {
// Create table 'cachedTable' in temp db for testing purpose.
spark.catalog.createTable(
s"$db.cachedTable",
"PARQUET",
StructType(Array(StructField("key", StringType))),
Map("LOCATION" -> path.toURI.toString))
withCache(s"$db.cachedTable") {
// Cache the table 'cachedTable' in temp db with qualified table name with storage level
// MEMORY_ONLY, and then check whether the table is cached with expected name and
// storage level.
spark.catalog.cacheTable(s"$db.cachedTable", MEMORY_ONLY)
assertCached(spark.table(s"$db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY)
assert(spark.catalog.isCached(s"$db.cachedTable"),
s"Table '$db.cachedTable' should be cached.")
// Refresh the table 'cachedTable' in temp db with qualified table name, and then check
// whether the table is still cached with the same name and storage level.
// Without bug fix 'SPARK-27248', the recreated cache storage level will be default
// storage level 'MEMORY_AND_DISK', instead of 'MEMORY_ONLY'.
spark.catalog.refreshTable(s"$db.cachedTable")
assertCached(spark.table(s"$db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY)
assert(spark.catalog.isCached(s"$db.cachedTable"),
s"Table '$db.cachedTable' should be cached after refreshing with its qualified name.")
// Change the active database to the temp db and refresh the table with unqualified
// table name, and then check whether the table is still cached with the same name and
// storage level.
// Without bug fix 'SPARK-27248', the recreated cache name will be changed to
// 'cachedTable', instead of '$db.cachedTable'
activateDatabase(db) {
spark.catalog.refreshTable("cachedTable")
assertCached(spark.table("cachedTable"), s"$db.cachedTable", MEMORY_ONLY)
assert(spark.catalog.isCached("cachedTable"),
s"Table '$db.cachedTable' should be cached after refreshing with its " +
"unqualified name.")
}
}
}
}
// This section tests when a table is cached with its unqualified name but it is refreshed
// with its qualified name.
withTempPath { path =>
withTable("cachedTable") {
// Create table 'cachedTable' in default db for testing purpose.
spark.catalog.createTable(
"cachedTable",
"PARQUET",
StructType(Array(StructField("key", StringType))),
Map("LOCATION" -> path.toURI.toString))
withCache("cachedTable") {
// Cache the table 'cachedTable' in default db without qualified table name with storage
// level 'MEMORY_AND_DISK2', and then check whether the table is cached with expected
// name and storage level.
spark.catalog.cacheTable("cachedTable", MEMORY_AND_DISK_2)
assertCached(spark.table("cachedTable"), "cachedTable", MEMORY_AND_DISK_2)
assert(spark.catalog.isCached("cachedTable"),
"Table 'cachedTable' should be cached.")
// Refresh the table 'cachedTable' in default db with unqualified table name, and then
// check whether the table is still cached with the same name and storage level.
// Without bug fix 'SPARK-27248', the recreated cache storage level will be default
// storage level 'MEMORY_AND_DISK', instead of 'MEMORY_AND_DISK2'.
spark.catalog.refreshTable("cachedTable")
assertCached(spark.table("cachedTable"), "cachedTable", MEMORY_AND_DISK_2)
assert(spark.catalog.isCached("cachedTable"),
"Table 'cachedTable' should be cached after refreshing with its unqualified name.")
// Change the active database to the temp db and refresh the table with qualified
// table name, and then check whether the table is still cached with the same name and
// storage level.
// Without bug fix 'SPARK-27248', the recreated cache name will be changed to
// 'default.cachedTable', instead of 'cachedTable'
activateDatabase(db) {
spark.catalog.refreshTable("default.cachedTable")
assertCached(spark.table("default.cachedTable"), "cachedTable", MEMORY_AND_DISK_2)
assert(spark.catalog.isCached("default.cachedTable"),
"Table 'cachedTable' should be cached after refreshing with its qualified name.")
}
}
}
}
}
}
test("cache supports for intervals") {
withTable("interval_cache") {
Seq((1, "1 second"), (2, "2 seconds"), (2, null))
.toDF("k", "v").write.saveAsTable("interval_cache")
sql("CACHE TABLE t1 AS SELECT k, cast(v as interval) FROM interval_cache")
assert(spark.catalog.isCached("t1"))
checkAnswer(sql("SELECT * FROM t1 WHERE k = 1"),
Row(1, new CalendarInterval(0, 0, DateTimeConstants.MICROS_PER_SECOND)))
sql("UNCACHE TABLE t1")
assert(!spark.catalog.isCached("t1"))
}
}
test("SPARK-30494 Fix the leak of cached data when replace an existing view") {
withTempView("tempView") {
spark.catalog.clearCache()
sql("create or replace temporary view tempView as select 1")
sql("cache table tempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isDefined)
sql("create or replace temporary view tempView as select 1, 2")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isEmpty)
sql("cache table tempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined)
}
withGlobalTempView("tempGlobalTempView") {
spark.catalog.clearCache()
sql("create or replace global temporary view tempGlobalTempView as select 1")
sql("cache table global_temp.tempGlobalTempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isDefined)
sql("create or replace global temporary view tempGlobalTempView as select 1, 2")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isEmpty)
sql("cache table global_temp.tempGlobalTempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined)
}
withView("view1") {
spark.catalog.clearCache()
sql("create or replace view view1 as select 1")
sql("cache table view1")
sql("create or replace view view1 as select 1, 2")
sql("cache table view1")
// the cached plan of persisted view likes below,
// we cannot use the same assertion of temp view.
// SubqueryAlias
// |
// + View
// |
// + Project[1 AS 1]
spark.sharedState.cacheManager.uncacheQuery(spark.table("view1"), cascade = false)
// make sure there is no cached data leak
assert(spark.sharedState.cacheManager.isEmpty)
}
}
test("SPARK-33228: Don't uncache data when replacing an existing view having the same plan") {
withTempView("tempView") {
spark.catalog.clearCache()
val df = spark.range(1).selectExpr("id a", "id b")
df.cache()
assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined)
df.createOrReplaceTempView("tempView")
assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined)
df.createOrReplaceTempView("tempView")
assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined)
}
withTempView("tempGlobalTempView") {
spark.catalog.clearCache()
val df = spark.range(1).selectExpr("id a", "id b")
df.cache()
assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined)
df.createOrReplaceGlobalTempView("tempGlobalTempView")
assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined)
df.createOrReplaceGlobalTempView("tempGlobalTempView")
assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined)
}
}
test("SPARK-33290: REFRESH TABLE should invalidate all caches referencing the table") {
withTable("t") {
withTempPath { path =>
withTempView("tempView1", "tempView2") {
Seq((1 -> "a")).toDF("i", "j").write.parquet(path.getCanonicalPath)
sql(s"CREATE TABLE t USING parquet LOCATION '${path.toURI}'")
sql("CREATE TEMPORARY VIEW tempView1 AS SELECT * FROM t")
sql("CACHE TABLE tempView2 AS SELECT i FROM tempView1")
checkAnswer(sql("SELECT * FROM tempView1"), Seq(Row(1, "a")))
checkAnswer(sql("SELECT * FROM tempView2"), Seq(Row(1)))
Utils.deleteRecursively(path)
sql("REFRESH TABLE tempView1")
checkAnswer(sql("SELECT * FROM tempView1"), Seq.empty)
checkAnswer(sql("SELECT * FROM tempView2"), Seq.empty)
}
}
}
}
test("SPARK-33729: REFRESH TABLE should not use cached/stale plan") {
def moveParquetFiles(src: File, dst: File): Unit = {
src.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = name.endsWith("parquet")
}).foreach { f =>
Files.move(f.toPath, Paths.get(dst.getAbsolutePath, f.getName))
}
// cleanup the rest of the files
src.listFiles().foreach(_.delete())
src.delete()
}
withTable("t") {
withTempDir { dir =>
val path1 = new File(dir, "path1")
Seq((1 -> "a")).toDF("i", "j").write.parquet(path1.getCanonicalPath)
moveParquetFiles(path1, dir)
sql(s"CREATE TABLE t (i INT, j STRING) USING parquet LOCATION '${dir.toURI}'")
sql("CACHE TABLE t")
checkAnswer(sql("SELECT * FROM t"), Row(1, "a") :: Nil)
val path2 = new File(dir, "path2")
Seq(2 -> "b").toDF("i", "j").write.parquet(path2.getCanonicalPath)
moveParquetFiles(path2, dir)
sql("REFRESH TABLE t")
checkAnswer(sql("SELECT * FROM t"), Row(1, "a") :: Row(2, "b") :: Nil)
}
}
}
test("SPARK-33647: cache table support for permanent view") {
withView("v1") {
spark.catalog.clearCache()
sql("create or replace view v1 as select 1")
sql("cache table v1")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isDefined)
sql("create or replace view v1 as select 1, 2")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isEmpty)
sql("cache table v1")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined)
}
}
test("SPARK-34269: cache lookup with ORDER BY / LIMIT clause") {
Seq("ORDER BY key", "LIMIT 10").foreach { clause =>
withTable("t") {
withTempView("v1") {
sql("CREATE TABLE t (key bigint, value string) USING parquet")
sql(s"CACHE TABLE v1 AS SELECT * FROM t $clause")
val query = sql(s"SELECT * FROM t $clause")
assert(spark.sharedState.cacheManager.lookupCachedData(query).isDefined)
}
}
}
}
test("SPARK-33786: Cache's storage level should be respected when a table name is altered.") {
withTable("old", "new") {
withTempPath { path =>
def getStorageLevel(tableName: String): StorageLevel = {
val table = spark.table(tableName)
val cachedData = spark.sharedState.cacheManager.lookupCachedData(table).get
cachedData.cachedRepresentation.cacheBuilder.storageLevel
}
Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath)
sql(s"CREATE TABLE old USING parquet LOCATION '${path.toURI}'")
sql("CACHE TABLE old OPTIONS('storageLevel' 'MEMORY_ONLY')")
val oldStorageLevel = getStorageLevel("old")
sql("ALTER TABLE old RENAME TO new")
val newStorageLevel = getStorageLevel("new")
assert(oldStorageLevel === newStorageLevel)
}
}
}
test("SPARK-34027: refresh cache in partitions recovering") {
withTable("t") {
sql("CREATE TABLE t (id int, part int) USING parquet PARTITIONED BY (part)")
sql("INSERT INTO t PARTITION (part=0) SELECT 0")
assert(!spark.catalog.isCached("t"))
sql("CACHE TABLE t")
assert(spark.catalog.isCached("t"))
checkAnswer(sql("SELECT * FROM t"), Seq(Row(0, 0)))
// Create new partition (part = 1) in the filesystem
val information = sql("SHOW TABLE EXTENDED LIKE 't' PARTITION (part = 0)")
.select("information")
.first().getString(0)
val part0Loc = information
.split("\\\\r?\\\\n")
.filter(_.startsWith("Location:"))
.head
.replace("Location: file:", "")
FileUtils.copyDirectory(
new File(part0Loc),
new File(part0Loc.replace("part=0", "part=1")))
sql("ALTER TABLE t RECOVER PARTITIONS")
assert(spark.catalog.isCached("t"))
checkAnswer(sql("SELECT * FROM t"), Seq(Row(0, 0), Row(0, 1)))
}
}
test("SPARK-34052: cascading cache invalidation - CatalogImpl.dropTempView") {
Seq(true, false).foreach { storeAnalyzed =>
withSQLConf(SQLConf.STORE_ANALYZED_PLAN_FOR_VIEW.key -> storeAnalyzed.toString) {
withTempView("view1", "view2") {
sql("CREATE TEMPORARY VIEW view1 AS SELECT * FROM testData WHERE key > 1")
sql("CACHE TABLE view2 AS SELECT * FROM view1 WHERE value > 1")
assert(spark.catalog.isCached("view2"))
val oldView = spark.table("view2")
spark.catalog.dropTempView("view1")
assert(storeAnalyzed ==
spark.sharedState.cacheManager.lookupCachedData(oldView).isDefined)
}
}
}
}
test("SPARK-34052: cascading cache invalidation - CatalogImpl.dropGlobalTempView") {
Seq(true, false).foreach { storeAnalyzed =>
withSQLConf(SQLConf.STORE_ANALYZED_PLAN_FOR_VIEW.key -> storeAnalyzed.toString) {
withGlobalTempView("view1") {
withTempView("view2") {
val db = spark.sharedState.globalTempViewManager.database
sql("CREATE GLOBAL TEMPORARY VIEW view1 AS SELECT * FROM testData WHERE key > 1")
sql(s"CACHE TABLE view2 AS SELECT * FROM ${db}.view1 WHERE value > 1")
assert(spark.catalog.isCached("view2"))
val oldView = spark.table("view2")
spark.catalog.dropGlobalTempView("view1")
assert(storeAnalyzed ==
spark.sharedState.cacheManager.lookupCachedData(oldView).isDefined)
}
}
}
}
}
test("SPARK-34052: cached temp view should become invalid after the source table is dropped") {
val t = "t"
withTable(t) {
sql(s"CREATE TABLE $t USING parquet AS SELECT * FROM VALUES(1, 'a') AS $t(a, b)")
sql(s"CACHE TABLE v AS SELECT a FROM $t")
checkAnswer(sql("SELECT * FROM v"), Row(1) :: Nil)
sql(s"DROP TABLE $t")
val e = intercept[AnalysisException](sql("SELECT * FROM v"))
assert(e.message.contains(s"Table or view not found: $t"))
}
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala | Scala | apache-2.0 | 54,648 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.testFramework.EditorTestUtil
/**
* @author Lukasz Piepiora
*/
class EmulateFlattenInspectionTest extends OperationsOnCollectionInspectionTest {
override protected val classOfInspection: Class[_ <: OperationOnCollectionInspection] =
classOf[EmulateFlattenInspection]
override protected val hint: String =
ScalaInspectionBundle.message("replace.with.flatten")
def testSuggests1(): Unit = {
val selected = s"Seq(Seq(1), Seq(2), Seq(2)).${START}flatMap(identity)$END"
checkTextHasError(selected)
val text = "Seq(Seq(1), Seq(2), Seq(2)).flatMap(identity)"
val result = "Seq(Seq(1), Seq(2), Seq(2)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests2(): Unit = {
val selected = s"Seq(Seq(1, 2, 3), Seq(4, 5), Seq(6, 7)).${START}flatMap(x => identity(x))$END"
checkTextHasError(selected)
val text = "Seq(Seq(1, 2, 3), Seq(4, 5), Seq(6, 7)).flatMap(x => identity(x))"
val result = "Seq(Seq(1, 2, 3), Seq(4, 5), Seq(6, 7)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests3(): Unit = {
val selected = s"Seq(Seq(3, 1, 4), Seq(1, 5), Seq(9, 2)).${START}flatMap(identity(_))$END"
checkTextHasError(selected)
val text = "Seq(Seq(3, 1, 4), Seq(1, 5), Seq(9, 2)).flatMap(identity(_))"
val result = "Seq(Seq(3, 1, 4), Seq(1, 5), Seq(9, 2)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests4(): Unit = {
val selected = s"Seq(Seq(2, 7, 1), Seq(8, 2), Seq(8, 1)).${START}flatMap(x => x)$END"
checkTextHasError(selected)
val text = "Seq(Seq(2, 7, 1), Seq(8, 2), Seq(8, 1)).flatMap(x => x)"
val result = "Seq(Seq(2, 7, 1), Seq(8, 2), Seq(8, 1)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests5(): Unit = {
val selected = s"Iterator(Iterator(1), Iterator(2), Iterator(3)).${START}flatMap(identity)$END"
checkTextHasError(selected)
val text = "Iterator(Iterator(1), Iterator(2), Iterator(3)).flatMap(identity)"
val result = "Iterator(Iterator(1), Iterator(2), Iterator(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests6(): Unit = {
val selected = s"Iterator(Seq(1), Seq(2), Seq(3)).${START}flatMap(identity)$END"
checkTextHasError(selected)
val text = "Iterator(Seq(1), Seq(2), Seq(3)).flatMap(identity)"
val result = "Iterator(Seq(1), Seq(2), Seq(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests7(): Unit = {
val selected = s"Set(Set(1), Set(2), Set(3)).${START}flatMap(identity)$END"
checkTextHasError(selected)
val text = "Set(Set(1), Set(2), Set(3)).flatMap(identity)"
val result = "Set(Set(1), Set(2), Set(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests8(): Unit = {
val selected = s"Set(Set(1), Set(2), Set(3)).${START}flatMap(x => x)$END"
checkTextHasError(selected)
val text = "Set(Set(1), Set(2), Set(3)).flatMap(x => x)"
val result = "Set(Set(1), Set(2), Set(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests9(): Unit = {
val selected = s"Set(Set(1), Set(2), Set(3)).${START}flatMap(identity[Set[Int]])$END"
checkTextHasError(selected)
val text = "Set(Set(1), Set(2), Set(3)).flatMap(identity[Set[Int]])"
val result = "Set(Set(1), Set(2), Set(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggests10(): Unit = {
val selected = s"Set(Set(1), Set(2), Set(3)).${START}flatMap(identity _)$END"
checkTextHasError(selected)
val text = "Set(Set(1), Set(2), Set(3)).flatMap(identity _)"
val result = "Set(Set(1), Set(2), Set(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggestes11(): Unit = {
val selected = s"Set(Set(1), Set(2), Set(3)) ${START}flatMap identity$END"
checkTextHasError(selected)
val text = "Set(Set(1), Set(2), Set(3)) flatMap identity"
val result = "Set(Set(1), Set(2), Set(3)).flatten"
testQuickFix(text, result, hint)
}
def testSuggestes12(): Unit = {
val selected = s"val o = Option(Option(1)); o.${START}getOrElse(None)$END"
checkTextHasError(selected)
val text = "val o = Option(Option(1)); o.getOrElse(None)"
val result = "val o = Option(Option(1)); o.flatten"
testQuickFix(text, result, hint)
}
def testSuggestes13(): Unit = {
val selected = s"Option(Option(1)).${START}getOrElse(None)$END"
checkTextHasError(selected)
val text = "Option(Option(1)).getOrElse(None)"
val result = "Option(Option(1)).flatten"
testQuickFix(text, result, hint)
}
def testSuggestes14(): Unit = {
val selected = s"val o = Option(Option(1)); o.${START}map(_.get)$END"
checkTextHasError(selected)
val text = "val o = Option(Option(1)); o.map(_.get)"
val result = "val o = Option(Option(1)); o.flatten"
testQuickFix(text, result, hint)
}
def testSuggestes15(): Unit = {
val selected = s"Option(Option(1)).${START}map(_.get)$END"
checkTextHasError(selected)
val text = "Option(Option(1)).map(_.get)"
val result = "Option(Option(1)).flatten"
testQuickFix(text, result, hint)
}
def testNotSuggests1(): Unit = {
val text = s"Seq(Seq(1), Seq(2), Seq(3)).flatMap(x => identity(Seq(1, 2, 3)))"
checkTextHasNoErrors(text)
}
def testNotSuggests2(): Unit = {
val text = s"Seq(Seq(9), Seq(8, 1), Seq(5, 9, 9)).flatMap(_.map(_ * 2))"
checkTextHasNoErrors(text)
}
def testNotSuggests3(): Unit = {
val text = s"Set(Set(1), Set(2), Set(3)).flatMap { x => println(x); x }"
checkTextHasNoErrors(text)
}
def testNotSuggests4(): Unit = {
val text = s"List(List(1), List(2), List(3)).flatMap(1 :: _ )"
checkTextHasNoErrors(text)
}
def testNotSuggests5(): Unit = {
val text = s"Option(Option(1)).getOrElse(Option(2))"
checkTextHasNoErrors(text)
}
def testNotSuggests6(): Unit = {
val text = s"Option(Option(1), 2).getOrElse(None)"
checkTextHasNoErrors(text)
}
def testNotSuggests7(): Unit = {
val text = s"Option(List(1)).getOrElse(None)"
checkTextHasNoErrors(text)
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/collections/EmulateFlattenInspectionTest.scala | Scala | apache-2.0 | 6,142 |
package mllib.perf
import org.json4s.JsonDSL._
import org.json4s.JsonAST._
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.classification._
import org.apache.spark.mllib.clustering.{KMeansModel, KMeans}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.recommendation.{Rating, MatrixFactorizationModel, ALS}
import org.apache.spark.mllib.regression._
import org.apache.spark.mllib.tree.impurity.{Variance, Gini}
import org.apache.spark.mllib.tree.RandomForest
import org.apache.spark.mllib.tree.model.RandomForestModel
import org.apache.spark.rdd.RDD
import mllib.perf.util.{DataLoader, DataGenerator}
/** Parent class for tests which run on a large dataset. */
abstract class RegressionAndClassificationTests[M](sc: SparkContext) extends PerfTest {
def runTest(rdd: RDD[LabeledPoint]): M
def validate(model: M, rdd: RDD[LabeledPoint]): Double
val NUM_EXAMPLES = ("num-examples", "number of examples for regression tests")
val NUM_FEATURES = ("num-features", "number of features of each example for regression tests")
intOptions = intOptions ++ Seq(NUM_FEATURES)
longOptions = Seq(NUM_EXAMPLES)
var rdd: RDD[LabeledPoint] = _
var testRdd: RDD[LabeledPoint] = _
override def run(): JValue = {
var start = System.currentTimeMillis()
val model = runTest(rdd)
val trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0
start = System.currentTimeMillis()
val trainingMetric = validate(model, rdd)
val testTime = (System.currentTimeMillis() - start).toDouble / 1000.0
val testMetric = validate(model, testRdd)
Map("trainingTime" -> trainingTime, "testTime" -> testTime,
"trainingMetric" -> trainingMetric, "testMetric" -> testMetric)
}
/**
* For classification
* @param predictions RDD over (prediction, truth) for each instance
* @return Percent correctly classified
*/
def calculateAccuracy(predictions: RDD[(Double, Double)], numExamples: Long): Double = {
predictions.map{case (pred, label) =>
if (pred == label) 1.0 else 0.0
}.sum() * 100.0 / numExamples
}
/**
* For regression
* @param predictions RDD over (prediction, truth) for each instance
* @return Root mean squared error (RMSE)
*/
def calculateRMSE(predictions: RDD[(Double, Double)], numExamples: Long): Double = {
val error = predictions.map{ case (pred, label) =>
(pred - label) * (pred - label)
}.sum()
math.sqrt(error / numExamples)
}
}
/** Parent class for Generalized Linear Model (GLM) tests */
abstract class GLMTests(sc: SparkContext)
extends RegressionAndClassificationTests[GeneralizedLinearModel](sc) {
val STEP_SIZE = ("step-size", "step size for SGD")
val NUM_ITERATIONS = ("num-iterations", "number of iterations for the algorithm")
val REG_TYPE = ("reg-type", "type of regularization: none, l1, l2")
val REG_PARAM = ("reg-param", "the regularization parameter against overfitting")
val OPTIMIZER = ("optimizer", "optimization algorithm: sgd, lbfgs")
intOptions = intOptions ++ Seq(NUM_ITERATIONS)
doubleOptions = doubleOptions ++ Seq(STEP_SIZE, REG_PARAM)
stringOptions = stringOptions ++ Seq(REG_TYPE, OPTIMIZER)
}
class GLMRegressionTest(sc: SparkContext) extends GLMTests(sc) {
val INTERCEPT = ("intercept", "intercept for random data generation")
val EPS = ("epsilon", "scale factor for the noise during data generation")
val LOSS = ("loss", "loss to minimize. Supported: l2 (squared error).")
doubleOptions = doubleOptions ++ Seq(INTERCEPT, EPS)
stringOptions = stringOptions ++ Seq(LOSS)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
override def createInputData(seed: Long) = {
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val intercept: Double = doubleOptionValue(INTERCEPT)
val eps: Double = doubleOptionValue(EPS)
val data = DataGenerator.generateLabeledPoints(sc, math.ceil(numExamples * 1.25).toLong,
numFeatures, intercept, eps, numPartitions, seed)
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def validate(model: GeneralizedLinearModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
calculateRMSE(predictions, numExamples)
}
override def runTest(rdd: RDD[LabeledPoint]): GeneralizedLinearModel = {
val stepSize = doubleOptionValue(STEP_SIZE)
val loss = stringOptionValue(LOSS)
val regType = stringOptionValue(REG_TYPE)
val regParam = doubleOptionValue(REG_PARAM)
val numIterations = intOptionValue(NUM_ITERATIONS)
val optimizer = stringOptionValue(OPTIMIZER)
if (!Array("l2").contains(loss)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown loss ($loss). Supported values: l2.")
}
if (!Array("none", "l1", "l2").contains(regType)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown regType ($regType). Supported values: none, l1, l2.")
}
if (!Array("sgd").contains(optimizer)) { // only SGD supported in Spark 1.1
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown optimizer ($optimizer). Supported values: sgd.")
}
(loss, regType) match {
case ("l2", "none") =>
val lr = new LinearRegressionWithSGD().setIntercept(addIntercept = true)
lr.optimizer.setNumIterations(numIterations).setStepSize(stepSize)
lr.run(rdd)
case ("l2", "l1") =>
val lasso = new LassoWithSGD().setIntercept(addIntercept = true)
lasso.optimizer.setNumIterations(numIterations).setStepSize(stepSize).setRegParam(regParam)
lasso.run(rdd)
case ("l2", "l2") =>
val rr = new RidgeRegressionWithSGD().setIntercept(addIntercept = true)
rr.optimizer.setNumIterations(numIterations).setStepSize(stepSize).setRegParam(regParam)
rr.run(rdd)
}
}
}
class GLMClassificationTest(sc: SparkContext) extends GLMTests(sc) {
val THRESHOLD = ("per-negative", "probability for a negative label during data generation")
val SCALE = ("scale-factor", "scale factor for the noise during data generation")
val LOSS = ("loss", "loss to minimize. Supported: logistic, hinge (SVM).")
doubleOptions = doubleOptions ++ Seq(THRESHOLD, SCALE)
stringOptions = stringOptions ++ Seq(LOSS)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
override def validate(model: GeneralizedLinearModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
calculateAccuracy(predictions, numExamples)
}
override def createInputData(seed: Long) = {
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val threshold: Double = doubleOptionValue(THRESHOLD)
val sf: Double = doubleOptionValue(SCALE)
val data = DataGenerator.generateClassificationLabeledPoints(sc,
math.ceil(numExamples * 1.25).toLong, numFeatures, threshold, sf, numPartitions, seed)
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def runTest(rdd: RDD[LabeledPoint]): GeneralizedLinearModel = {
val stepSize = doubleOptionValue(STEP_SIZE)
val loss = stringOptionValue(LOSS)
val regType = stringOptionValue(REG_TYPE)
val regParam = doubleOptionValue(REG_PARAM)
val numIterations = intOptionValue(NUM_ITERATIONS)
val optimizer = stringOptionValue(OPTIMIZER)
if (!Array("logistic", "hinge").contains(loss)) {
throw new IllegalArgumentException(
s"GLMClassificationTest run with unknown loss ($loss). Supported values: logistic, hinge.")
}
if (!Array("none", "l1", "l2").contains(regType)) {
throw new IllegalArgumentException(s"GLMClassificationTest run with unknown regType" +
s" ($regType). Supported values: none, l1, l2.")
}
if (!Array("sgd", "lbfgs").contains(optimizer)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown optimizer ($optimizer). Supported values: sgd, lbfgs.")
}
(loss, regType, optimizer) match {
case ("logistic", "none", "sgd") =>
LogisticRegressionWithSGD.train(rdd, numIterations, stepSize)
case ("logistic", "none", "lbfgs") =>
println("WARNING: LogisticRegressionWithLBFGS ignores numIterations, stepSize" +
" in this Spark version.")
new LogisticRegressionWithLBFGS().run(rdd)
case ("hinge", "l2", "sgd") =>
SVMWithSGD.train(rdd, numIterations, stepSize, regParam)
case _ =>
throw new IllegalArgumentException(
s"GLMClassificationTest given incompatible (loss, regType) = ($loss, $regType)." +
s" Note the set of supported combinations increases in later Spark versions.")
}
}
}
abstract class RecommendationTests(sc: SparkContext) extends PerfTest {
def runTest(rdd: RDD[Rating]): MatrixFactorizationModel
val NUM_USERS = ("num-users", "number of users for recommendation tests")
val NUM_PRODUCTS = ("num-products", "number of features of each example for recommendation tests")
val NUM_RATINGS = ("num-ratings", "number of ratings for recommendation tests")
val RANK = ("rank", "rank of factorized matrices for recommendation tests")
val IMPLICIT = ("implicit-prefs", "use implicit ratings")
val NUM_ITERATIONS = ("num-iterations", "number of iterations for the algorithm")
val REG_PARAM = ("reg-param", "the regularization parameter against overfitting")
intOptions = intOptions ++ Seq(NUM_USERS, NUM_PRODUCTS, RANK, NUM_ITERATIONS)
longOptions = longOptions ++ Seq(NUM_RATINGS)
booleanOptions = booleanOptions ++ Seq(IMPLICIT)
doubleOptions = doubleOptions ++ Seq(REG_PARAM)
val options = intOptions ++ stringOptions ++ booleanOptions ++ longOptions ++ doubleOptions
addOptionsToParser()
var rdd: RDD[Rating] = _
var testRdd: RDD[Rating] = _
override def createInputData(seed: Long) = {
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val numUsers: Int = intOptionValue(NUM_USERS)
val numProducts: Int = intOptionValue(NUM_PRODUCTS)
val numRatings: Long = longOptionValue(NUM_RATINGS)
val implicitRatings: Boolean = booleanOptionValue(IMPLICIT)
val data = DataGenerator.generateRatings(sc, numUsers, numProducts,
math.ceil(numRatings * 1.25).toLong, implicitRatings,numPartitions,seed)
rdd = data._1.cache()
testRdd = data._2
// Materialize rdd
println("Num Examples: " + rdd.count())
}
def validate(model: MatrixFactorizationModel,
data: RDD[Rating]): Double = {
val implicitPrefs: Boolean = booleanOptionValue(IMPLICIT)
val predictions: RDD[Rating] = model.predict(data.map(x => (x.user, x.product)))
val predictionsAndRatings: RDD[(Double, Double)] = predictions.map{ x =>
def mapPredictedRating(r: Double) = if (implicitPrefs) math.max(math.min(r, 1.0), 0.0) else r
((x.user, x.product), mapPredictedRating(x.rating))
}.join(data.map(x => ((x.user, x.product), x.rating))).values
math.sqrt(predictionsAndRatings.map(x => (x._1 - x._2) * (x._1 - x._2)).mean())
}
override def run(): JValue = {
var start = System.currentTimeMillis()
val model = runTest(rdd)
val trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0
start = System.currentTimeMillis()
val trainingMetric = validate(model, rdd)
val testTime = (System.currentTimeMillis() - start).toDouble / 1000.0
val testMetric = validate(model, testRdd)
Map("trainingTime" -> trainingTime, "testTime" -> testTime,
"trainingMetric" -> trainingMetric, "testMetric" -> testMetric)
}
}
abstract class ClusteringTests(sc: SparkContext) extends PerfTest {
def runTest(rdd: RDD[Vector]): KMeansModel
val NUM_POINTS = ("num-points", "number of points for clustering tests")
val NUM_COLUMNS = ("num-columns", "number of columns for each point for clustering tests")
val NUM_CENTERS = ("num-centers", "number of centers for clustering tests")
val NUM_ITERATIONS = ("num-iterations", "number of iterations for the algorithm")
intOptions = intOptions ++ Seq(NUM_CENTERS, NUM_COLUMNS, NUM_ITERATIONS)
longOptions = longOptions ++ Seq(NUM_POINTS)
val options = intOptions ++ stringOptions ++ booleanOptions ++ longOptions ++ doubleOptions
addOptionsToParser()
var rdd: RDD[Vector] = _
var testRdd: RDD[Vector] = _
def validate(model: KMeansModel, rdd: RDD[Vector]): Double = {
val numPoints = rdd.cache().count()
val error = model.computeCost(rdd)
math.sqrt(error/numPoints)
}
override def createInputData(seed: Long) = {
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val numPoints: Long = longOptionValue(NUM_POINTS)
val numColumns: Int = intOptionValue(NUM_COLUMNS)
val numCenters: Int = intOptionValue(NUM_CENTERS)
val data = DataGenerator.generateKMeansVectors(sc, math.ceil(numPoints*1.25).toLong, numColumns,
numCenters, numPartitions, seed)
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def run(): JValue = {
var start = System.currentTimeMillis()
val model = runTest(rdd)
val trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0
start = System.currentTimeMillis()
val trainingMetric = validate(model, rdd)
val testTime = (System.currentTimeMillis() - start).toDouble / 1000.0
val testMetric = validate(model, testRdd)
Map("trainingTime" -> trainingTime, "testTime" -> testTime,
"trainingMetric" -> trainingMetric, "testMetric" -> testMetric)
}
}
// Classification Algorithms
class NaiveBayesTest(sc: SparkContext)
extends RegressionAndClassificationTests[NaiveBayesModel](sc) {
val THRESHOLD = ("per-negative", "probability for a negative label during data generation")
val SCALE = ("scale-factor", "scale factor for the noise during data generation")
val SMOOTHING = ("nb-lambda", "the smoothing parameter lambda for Naive Bayes")
val MODEL_TYPE = ("model-type", "either multinomial (default) or bernoulli")
doubleOptions = doubleOptions ++ Seq(THRESHOLD, SCALE, SMOOTHING)
stringOptions = stringOptions ++ Seq(MODEL_TYPE)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
/** Note: using same data generation as for GLMClassificationTest, but should change later */
override def createInputData(seed: Long) = {
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val threshold: Double = doubleOptionValue(THRESHOLD)
val sf: Double = doubleOptionValue(SCALE)
val modelType = stringOptionValue(MODEL_TYPE)
val data = if (modelType == "Bernoulli") {
DataGenerator.generateBinaryLabeledPoints(sc,
math.ceil(numExamples * 1.25).toLong, numFeatures, threshold, numPartitions, seed)
} else {
val negdata = DataGenerator.generateClassificationLabeledPoints(sc,
math.ceil(numExamples * 1.25).toLong, numFeatures, threshold, sf, numPartitions, seed)
val dataNonneg = negdata.map { lp =>
LabeledPoint(lp.label, Vectors.dense(lp.features.toArray.map(math.abs)))
}
dataNonneg
}
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def validate(model: NaiveBayesModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
calculateAccuracy(predictions, numExamples)
}
override def runTest(rdd: RDD[LabeledPoint]): NaiveBayesModel = {
val lambda = doubleOptionValue(SMOOTHING)
val modelType = stringOptionValue(MODEL_TYPE)
NaiveBayes.train(rdd, lambda, modelType)
}
}
// Recommendation
class ALSTest(sc: SparkContext) extends RecommendationTests(sc) {
override def runTest(rdd: RDD[Rating]): MatrixFactorizationModel = {
val numIterations: Int = intOptionValue(NUM_ITERATIONS)
val rank: Int = intOptionValue(RANK)
val regParam = doubleOptionValue(REG_PARAM)
val seed = intOptionValue(RANDOM_SEED) + 12
new ALS().setIterations(numIterations).setRank(rank).setSeed(seed).setLambda(regParam)
.setBlocks(rdd.partitions.size).run(rdd)
}
}
// Clustering
class KMeansTest(sc: SparkContext) extends ClusteringTests(sc) {
override def runTest(rdd: RDD[Vector]): KMeansModel = {
val numIterations: Int = intOptionValue(NUM_ITERATIONS)
val k: Int = intOptionValue(NUM_CENTERS)
KMeans.train(rdd, k, numIterations)
}
}
/**
* Parent class for DecisionTree-based tests which run on a large dataset.
*/
abstract class DecisionTreeTests(sc: SparkContext)
extends RegressionAndClassificationTests[RandomForestModel](sc) {
val TEST_DATA_FRACTION =
("test-data-fraction", "fraction of data to hold out for testing (ignored if given training and test dataset)")
val LABEL_TYPE =
("label-type", "Type of label: 0 indicates regression, 2+ indicates " +
"classification with this many classes")
val FRAC_CATEGORICAL_FEATURES = ("frac-categorical-features",
"Fraction of features which are categorical")
val FRAC_BINARY_FEATURES =
("frac-binary-features", "Fraction of categorical features which are binary. " +
"Others have 20 categories.")
val TREE_DEPTH = ("tree-depth", "Depth of true decision tree model used to label examples.")
val MAX_BINS = ("max-bins", "Maximum number of bins for the decision tree learning algorithm.")
val NUM_TREES = ("num-trees", "Number of trees to train. If 1, run DecisionTree. If >1, run an ensemble method (RandomForest).")
val FEATURE_SUBSET_STRATEGY =
("feature-subset-strategy", "Strategy for feature subset sampling. Supported: auto, all, sqrt, log2, onethird.")
intOptions = intOptions ++ Seq(LABEL_TYPE, TREE_DEPTH, MAX_BINS, NUM_TREES)
doubleOptions = doubleOptions ++ Seq(TEST_DATA_FRACTION, FRAC_CATEGORICAL_FEATURES, FRAC_BINARY_FEATURES)
stringOptions = stringOptions ++ Seq(FEATURE_SUBSET_STRATEGY)
addOptionalOptionToParser("training-data", "path to training dataset (if not given, use random data)", "", classOf[String])
addOptionalOptionToParser("test-data", "path to test dataset (only used if training dataset given)" +
" (if not given, hold out part of training data for validation)", "", classOf[String])
var categoricalFeaturesInfo: Map[Int, Int] = Map.empty
protected var labelType = -1
def validate(model: RandomForestModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
val labelType: Int = intOptionValue(LABEL_TYPE)
if (labelType == 0) {
calculateRMSE(predictions, numExamples)
} else {
calculateAccuracy(predictions, numExamples)
}
}
}
class DecisionTreeTest(sc: SparkContext) extends DecisionTreeTests(sc) {
val ENSEMBLE_TYPE = ("ensemble-type", "Type of ensemble algorithm: RandomForest.")
stringOptions = stringOptions ++ Seq(ENSEMBLE_TYPE)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
private def getTestDataFraction: Double = {
val testDataFraction: Double = doubleOptionValue(TEST_DATA_FRACTION)
assert(testDataFraction >= 0 && testDataFraction <= 1, s"Bad testDataFraction: $testDataFraction")
testDataFraction
}
override def createInputData(seed: Long) = {
val trainingDataPath: String = optionValue[String]("training-data")
val (rdds, categoricalFeaturesInfo_, numClasses) = if (trainingDataPath != "") {
println(s"LOADING FILE: $trainingDataPath")
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val testDataPath: String = optionValue[String]("test-data")
val testDataFraction: Double = getTestDataFraction
DataLoader.loadLibSVMFiles(sc, numPartitions, trainingDataPath, testDataPath,
testDataFraction, seed)
} else {
createSyntheticInputData(seed)
}
assert(rdds.length == 2)
rdd = rdds(0).cache()
testRdd = rdds(1)
categoricalFeaturesInfo = categoricalFeaturesInfo_
this.labelType = numClasses
// Materialize rdd
println("Num Examples: " + rdd.count())
}
/**
* Create synthetic training and test datasets.
* @return (trainTestDatasets, categoricalFeaturesInfo, numClasses) where
* trainTestDatasets = Array(trainingData, testData),
* categoricalFeaturesInfo is a map of categorical feature arities, and
* numClasses = number of classes label can take.
*/
private def createSyntheticInputData(
seed: Long): (Array[RDD[LabeledPoint]], Map[Int, Int], Int) = {
// Generic test options
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val testDataFraction: Double = getTestDataFraction
// Data dimensions and type
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val labelType: Int = intOptionValue(LABEL_TYPE)
val fracCategoricalFeatures: Double = doubleOptionValue(FRAC_CATEGORICAL_FEATURES)
val fracBinaryFeatures: Double = doubleOptionValue(FRAC_BINARY_FEATURES)
// Model specification
val treeDepth: Int = intOptionValue(TREE_DEPTH)
val (rdd_, categoricalFeaturesInfo_) =
DataGenerator.generateDecisionTreeLabeledPoints(sc, math.ceil(numExamples * 1.25).toLong,
numFeatures, numPartitions, labelType,
fracCategoricalFeatures, fracBinaryFeatures, treeDepth, seed)
val splits = rdd_.randomSplit(Array(1.0 - testDataFraction, testDataFraction), seed)
(splits, categoricalFeaturesInfo_, labelType)
}
override def runTest(rdd: RDD[LabeledPoint]): RandomForestModel = {
val treeDepth: Int = intOptionValue(TREE_DEPTH)
val maxBins: Int = intOptionValue(MAX_BINS)
val numTrees: Int = intOptionValue(NUM_TREES)
val featureSubsetStrategy: String = stringOptionValue(FEATURE_SUBSET_STRATEGY)
val ensembleType: String = stringOptionValue(ENSEMBLE_TYPE)
if (!Array("RandomForest").contains(ensembleType)) {
throw new IllegalArgumentException(
s"DecisionTreeTest given unknown ensembleType param: $ensembleType." +
" Supported values: RandomForest.")
}
if (labelType == 0) {
// Regression
ensembleType match {
case "RandomForest" =>
RandomForest.trainRegressor(rdd, categoricalFeaturesInfo, numTrees, featureSubsetStrategy,
"variance", treeDepth, maxBins, this.getRandomSeed)
}
} else if (labelType >= 2) {
// Classification
ensembleType match {
case "RandomForest" =>
RandomForest.trainClassifier(rdd, labelType, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, "gini", treeDepth, maxBins, this.getRandomSeed)
}
} else {
throw new IllegalArgumentException(s"Bad label-type parameter " +
s"given to DecisionTreeTest: $labelType")
}
}
}
| XiaoqingWang/spark-perf | mllib-tests/v1p4/src/main/scala/mllib/perf/MLAlgorithmTests.scala | Scala | apache-2.0 | 24,370 |
package mesosphere.marathon.core.appinfo.impl
import mesosphere.marathon.MarathonSchedulerService
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.appinfo.{ TaskCounts, AppInfo, EnrichedTask }
import mesosphere.marathon.health.{ Health, HealthCheckManager, HealthCounts }
import mesosphere.marathon.state.{ TaskFailure, TaskFailureRepository, Identifiable, PathId, AppDefinition }
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.marathon.upgrade.DeploymentPlan
import org.slf4j.LoggerFactory
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.util.control.NonFatal
class AppInfoBaseData(
taskTracker: TaskTracker,
healthCheckManager: HealthCheckManager,
marathonSchedulerService: MarathonSchedulerService,
taskFailureRepository: TaskFailureRepository) {
import AppInfoBaseData.log
import scala.concurrent.ExecutionContext.Implicits.global
lazy val runningDeploymentsByAppFuture: Future[Map[PathId, Seq[Identifiable]]] = {
log.debug("Retrieving running deployments")
val allRunningDeploymentsFuture: Future[Seq[DeploymentPlan]] =
for {
stepInfos <- marathonSchedulerService.listRunningDeployments()
} yield stepInfos.map(_.plan)
allRunningDeploymentsFuture.map { allDeployments =>
val byApp = Map.empty[PathId, Vector[DeploymentPlan]].withDefaultValue(Vector.empty)
val deploymentsByAppId = allDeployments.foldLeft(byApp) { (result, deploymentPlan) =>
deploymentPlan.affectedApplicationIds.foldLeft(result) { (result, appId) =>
val newEl = appId -> (result(appId) :+ deploymentPlan)
result + newEl
}
}
deploymentsByAppId
.mapValues(_.map(deploymentPlan => Identifiable(deploymentPlan.id)))
.withDefaultValue(Seq.empty)
}
}
def appInfoFuture(app: AppDefinition, embed: Set[AppInfo.Embed]): Future[AppInfo] = {
val appData = new AppData(app)
embed.foldLeft(Future.successful(AppInfo(app))) { (infoFuture, embed) =>
infoFuture.flatMap { info =>
embed match {
case AppInfo.Embed.Counts =>
appData.taskCountsFuture.map(counts => info.copy(maybeCounts = Some(counts)))
case AppInfo.Embed.Deployments =>
runningDeploymentsByAppFuture.map(deployments => info.copy(maybeDeployments = Some(deployments(app.id))))
case AppInfo.Embed.LastTaskFailure =>
appData.maybeLastTaskFailureFuture.map { maybeLastTaskFailure =>
info.copy(maybeLastTaskFailure = maybeLastTaskFailure)
}
case AppInfo.Embed.Tasks =>
appData.enrichedTasksFuture.map(tasks => info.copy(maybeTasks = Some(tasks)))
}
}
}
}
/**
* Contains app-sepcific data that we need to retrieved.
*
* All data is lazy such that only data that is actually needed for the requested embedded information
* gets retrieved.
*/
private[this] class AppData(app: AppDefinition) {
lazy val tasks: Set[MarathonTask] = {
log.debug(s"retrieving running tasks for app [${app.id}]")
taskTracker.get(app.id)
}
lazy val tasksFuture: Future[Set[MarathonTask]] = Future.successful(tasks)
lazy val healthCountsFuture: Future[HealthCounts] = {
log.debug(s"retrieving health counts for app [${app.id}]")
healthCheckManager.healthCounts(app.id)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while retrieving health counts for app [${app.id}]", e)
}
lazy val taskCountsFuture = {
log.debug(s"calculating task counts for app [${app.id}]")
for {
tasks <- tasksFuture
healthCounts <- healthCountsFuture
} yield TaskCounts(tasks, healthCounts)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while calculating task counts for app [${app.id}]", e)
}
lazy val enrichedTasksFuture: Future[Seq[EnrichedTask]] = {
def statusesToEnrichedTasks(
tasksById: Map[String, MarathonTask],
statuses: Map[String, collection.Seq[Health]]): Seq[EnrichedTask] = {
for {
(taskId, healthResults) <- statuses.to[Seq]
task <- tasksById.get(taskId)
} yield EnrichedTask(app.id, task, healthResults)
}
log.debug(s"assembling rich tasks for app [${app.id}]")
val tasksById: Map[String, MarathonTask] = tasks.map(task => task.getId -> task).toMap
healthCheckManager.statuses(app.id).map(statuses => statusesToEnrichedTasks(tasksById, statuses))
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while assembling rich tasks for app [${app.id}]", e)
}
lazy val maybeLastTaskFailureFuture: Future[Option[TaskFailure]] = {
log.debug(s"retrieving last task failure for app [${app.id}]")
taskFailureRepository.current(app.id)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while retrieving last task failure for app [${app.id}]", e)
}
}
}
object AppInfoBaseData {
private val log = LoggerFactory.getLogger(getClass)
}
| cgvarela/marathon | src/main/scala/mesosphere/marathon/core/appinfo/impl/AppInfoBaseData.scala | Scala | apache-2.0 | 5,100 |
/*
* Copyright 2006-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mapper
import net.liftweb._
import net.liftweb.http.provider._
import common._
import util._
import http._
import Helpers._
trait ProtoExtendedSession[T <: ProtoExtendedSession[T]] extends
KeyedMapper[Long, T] {
self: T =>
override def primaryKeyField: MappedLongIndex[T] = id
// the primary key for the database
object id extends MappedLongIndex(this)
// uniqueId
object cookieId extends MappedUniqueId(this, 32) {
override def dbIndexed_? = true
}
object userId extends MappedString(this, 64)
object expiration extends MappedLong(this) {
override def defaultValue = expirationTime
override def dbColumnName = expirationColumnName
}
/**
* Change this string to "experation" for compatibility with
* old mis-spelling
*/
protected def expirationColumnName = "expiration"
def expirationTime: Long = millis + 180.days
}
trait UserIdAsString {
def userIdAsString: String
}
trait MetaProtoExtendedSession[T <: ProtoExtendedSession[T]] extends
KeyedMetaMapper[Long, T] {
self: T =>
def CookieName = "ext_id"
type UserType <: UserIdAsString
/*
private object myWrapper extends LoanWrapper {
def apply[N](f: => N): N = {
(recoverUserId, S.findCookie(CookieName)) match {
case (Empty, Full(c)) =>
find(By(cookieId, c.value openOr "")) match {
case Full(es) if es.expiration.is < millis => es.delete_!
case Full(es) => logUserIdIn(es.userId)
case _ =>
}
case _ =>
}
f
}
}*/
def logUserIdIn(uid: String): Unit
def recoverUserId: Box[String]
def userDidLogin(uid: UserType) {
userDidLogout(Full(uid))
val inst = create.userId(uid.userIdAsString).saveMe
val cookie = HTTPCookie(CookieName, inst.cookieId.get).
setMaxAge(((inst.expiration.get - millis) / 1000L).toInt).
setPath("/")
S.addCookie(cookie)
}
def userDidLogout(uid: Box[UserType]) {
for (cook <- S.findCookie(CookieName)) {
S.deleteCookie(cook)
find(By(cookieId, cook.value openOr "")).foreach(_.delete_!)
}
}
// def requestLoans: List[LoanWrapper] = myWrapper :: Nil
/**
* This does the cookie to User lookup. In Boot.scala:
* <code>
LiftRules.earlyInStateful.append(ExtendedSession.testCookieEarlyInStateful)
* </code>
*/
def testCookieEarlyInStateful: Box[Req] => Unit = {
ignoredReq => {
(recoverUserId, S.findCookie(CookieName)) match {
case (Empty, Full(c)) =>
find(By(cookieId, c.value openOr "")) match {
case Full(es) if es.expiration.get < millis => es.delete_!
case Full(es) => logUserIdIn(es.userId.get)
case _ =>
}
case _ =>
}
}
}
}
| lzpfmh/framework-2 | persistence/mapper/src/main/scala/net/liftweb/mapper/ProtoExtendedSession.scala | Scala | apache-2.0 | 3,402 |
package barbedwire
import scala.annotation.tailrec
/**
* A Rep-less implementation of FoldLeft
* Easier to run functionality tests on.
*
* A copy-paste of `FoldLeft.scala`.
*/
trait RepLessFolds {
/**
* a type alias for the combination function for
* foldLeft
* `A` is the type of elements that pass through the fold
* `S` is the type that is eventually computed
*/
type Comb[A, S] = (S, A) => S
/**
* foldLeft is basically a pair of a zero value and a combination function
*/
abstract class FoldLeft[A] { self =>
def apply[S](z: S, comb: Comb[A, S]): S
/**
* map
*/
def map[B](f: A => B) = new FoldLeft[B] {
def apply[S](z: S, comb: Comb[B, S]) = self.apply(
z,
(acc: S, elem: A) => comb(acc, f(elem))
)
}
/**
* filter
*/
def filter(p: A => Boolean) = new FoldLeft[A] {
def apply[S](z: S, comb: Comb[A, S]) = self.apply(
z,
(acc: S, elem: A) => if (p(elem)) comb(acc, elem) else acc
)
}
/**
* flatMap
*/
def flatMap[B](f: A => FoldLeft[B]) = new FoldLeft[B] {
def apply[S](z: S, comb: Comb[B, S]) = self.apply(
z,
(acc: S, elem: A) => {
val nestedFld = f(elem)
nestedFld.apply(acc, comb)
}
)
}
/**
* concat
*/
def concat(that: FoldLeft[A]) = new FoldLeft[A] {
def apply[S](z: S, comb: Comb[A, S]) = {
val folded: S = self.apply(z, comb)
that.apply(folded, comb)
}
}
def ++(that: FoldLeft[A]) = this concat that
/**
* append
*/
def append(elem: A) = new FoldLeft[A] {
def apply[S](z: S, comb: Comb[A, S]) = {
val folded: S = self.apply(z, comb)
comb(folded, elem)
}
}
def :+(elem: A) = this append elem
/**
* partition
* This will create code what will run through the original fold twice
* once for the positive predicate, once for the negative.
*
* see the following related post: http://manojo.github.io/2015/03/03/staged-foldleft-partition/
*/
def partition(p: A => Boolean): (FoldLeft[A], FoldLeft[A]) = {
val trues = this filter p
val falses = this filter (a => !p(a))
(trues, falses)
}
/**
* partition, that produces a FoldLeft over `Either` instead of
* two `FoldLeft`s. The important thing is to keep the one
* FoldLeft abstraction.
* This can be rewritten using `map`.
* see the following related post: http://manojo.github.io/2015/03/12/staged-foldleft-groupby/
*/
def partitionBis(p: A => Boolean) = new FoldLeft[Either[A, A]] {
def apply[S](z: S, comb: Comb[Either[A, A], S]) = self.apply(
z,
(acc: S, elem: A) =>
if (p(elem)) comb(acc, Left(elem))
else comb(acc, Right(elem))
)
}
/**
* groupWith
* takes a function which computes some grouping property
* does not create groups just yet, just propagates key-value pairs
*
* can be rewritten using `map`.
* see the following related post: http://manojo.github.io/2015/03/12/staged-foldleft-groupby/
*/
def groupWith[K](f: A => K): FoldLeft[(K, A)] =
this map (elem => (f(elem), elem))
}
/**
* companion object, makes it easier to
* construct folds
*/
object FoldLeft {
/**
* create a fold from list
*/
def fromList[A](ls: List[A]) = new FoldLeft[A] {
def apply[S](z: S, comb: Comb[A, S]): S = {
@tailrec
def loop(acc: S, rest: List[A]): S = rest match {
case Nil => acc
case x :: xs => loop(comb(acc, x), xs)
}
loop(z, ls)
}
}
/**
* create a fold from a range
*/
def fromRange(a: Int, b: Int) = new FoldLeft[Int] {
def apply[S](z: S, comb: Comb[Int, S]) = {
@tailrec
def loop(acc: S, curElem: Int): S = {
if (curElem > b) acc
else loop(comb(acc, curElem), curElem + 1)
}
loop(z, a)
}
}
}
}
| manojo/staged-fold-fusion | src/main/scala/barbedwire/RepLessFold.scala | Scala | mit | 4,093 |
package kata.calc
import kata.calc.Calculator.evaluate
import org.scalatest.{FlatSpec, Matchers}
class StringCalculatorTest extends FlatSpec with Matchers {
it should " evaluate integer number " in {
evaluate("1023") shouldBe 1023
}
it should " evaluate float number " in {
evaluate("123.435") shouldBe 123.435
}
it should " evaluate addition " in {
evaluate("324+213") shouldBe (324+213)
}
it should " evaluate subtraction " in {
evaluate("345.6547-213.543") shouldBe (345.6547-213.543)
}
it should "evaluate multiplication" in {
evaluate("4395×5") shouldBe 4395*5
}
it should " evaluate division " in {
evaluate("435÷5") shouldBe 435/5
}
it should "evaluate number of operations" in {
evaluate("3243+543-21-12821÷435+15×28") shouldBe (3243.0+543-21-12821.0/435+15*28)
}
}
| Alex-Diez/Scala-TDD-Katas | old-katas/string-calc/day-3/src/test/scala/kata/calc/StringCalculatorTest.scala | Scala | mit | 900 |
package models.admin
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import db._
import io.megam.auth.funnel.FunnelErrors._
import controllers.Constants._
import models.billing.BalancesResults
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
import io.megam.auth.stack.Role.{ADMIN}
object Balances {
//Admin can suspend, impersonate, block, unblock, active users hack for 1.5.
def update(input: String): ValidationNel[Throwable, BalancesResults] = {
models.billing.Balances.update(input)
}
}
| indykish/vertice_gateway | app/models/admin/Balances.scala | Scala | mit | 661 |
package org.apache.mesos.chronos.scheduler.jobs
import org.joda.time._
import org.specs2.mock._
import org.specs2.mutable._
class TaskUtilsSpec extends SpecificationWithJUnit with Mockito {
"TaskUtils" should {
"Get taskId" in {
val schedule = "R/2012-01-01T00:00:01.000Z/P1M"
val arguments = "-a 1 -b 2"
val cmdArgs = "-c 1 -d 2"
val job1 = new ScheduleBasedJob(schedule, "sample-name", "sample-command", arguments = List(arguments))
val job2 = new ScheduleBasedJob(schedule, "sample-name", "sample-command")
val job3 = new ScheduleBasedJob(schedule, "sample-name", "sample-command", arguments = List(arguments))
val ts = 1420843781398L
val due = new DateTime(ts)
val taskIdOne = TaskUtils.getTaskId(job1, due, 0)
val taskIdTwo = TaskUtils.getTaskId(job2, due, 0)
val taskIdThree = TaskUtils.getTaskId(job3, due, 0, Option(cmdArgs))
val taskIdFour = TaskUtils.getTaskId(job2, due, 0, Option(cmdArgs))
taskIdOne must_== "ct:1420843781398:0:sample-name:" + arguments
taskIdTwo must_== "ct:1420843781398:0:sample-name:"
taskIdThree must_== "ct:1420843781398:0:sample-name:" + cmdArgs // test override
taskIdFour must_== "ct:1420843781398:0:sample-name:" + cmdArgs // test adding args
}
"Get job arguments for taskId" in {
val arguments = "-a 1 -b 2"
var taskId = "ct:1420843781398:0:test:" + arguments
val jobArguments = TaskUtils.getJobArgumentsForTaskId(taskId)
jobArguments must_== arguments
}
"Disable command injection" in {
val schedule = "R/2012-01-01T00:00:01.000Z/P1M"
val cmdArgs = "-c 1 ; ./evil.sh"
val expectedArgs = "-c 1 ./evil.sh"
val job1 = new ScheduleBasedJob(schedule, "sample-name", "sample-command")
val ts = 1420843781398L
val due = new DateTime(ts)
val taskIdOne = TaskUtils.getTaskId(job1, due, 0, Option(cmdArgs))
taskIdOne must_== "ct:1420843781398:0:sample-name:" + expectedArgs
}
"Parse taskId" in {
val arguments = "-a 1 -b 2"
val arguments2 = "-a 1:2 --B test"
val taskIdOne = "ct:1420843781398:0:test:" + arguments
val (jobName, jobDue, attempt, jobArguments) = TaskUtils.parseTaskId(taskIdOne)
jobName must_== "test"
jobDue must_== 1420843781398L
attempt must_== 0
jobArguments must_== arguments
val taskIdTwo = "ct:1420843781398:0:test:" + arguments2
val (_, _, _, jobArguments2) = TaskUtils.parseTaskId(taskIdTwo)
jobArguments2 must_== arguments2
val taskIdThree = "ct:1420843781398:0:test"
val (jobName3, _, _, jobArguments3) = TaskUtils.parseTaskId(taskIdThree)
jobName3 must_== "test"
jobArguments3 must_== ""
}
}
}
| mikkokupsu/chronos | src/test/scala/org/apache/mesos/chronos/scheduler/jobs/TaskUtilsSpec.scala | Scala | apache-2.0 | 2,771 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package breeze
package text
package transform
;
;
;
/**
* A generic (loadable) transformation of a tokenized input text.
*
* @author dramage
*/
@SerialVersionUID(1)
trait Transformer extends (Iterable[String] => Iterable[String]) with Serializable {
override def toString = getClass.getName + "()"
}
| tjhunter/scalanlp-core | process/src/main/scala/breeze/text/transform/Transformer.scala | Scala | apache-2.0 | 888 |
package org.http4s
package headers
import org.http4s.parser.HttpHeaderParser
import org.http4s.util.NonEmptyList
object `Accept-Charset` extends HeaderKey.Internal[`Accept-Charset`] with HeaderKey.Recurring {
override def parse(s: String): ParseResult[`Accept-Charset`] =
HttpHeaderParser.ACCEPT_CHARSET(s)
}
final case class `Accept-Charset`(values: NonEmptyList[CharsetRange]) extends Header.RecurringRenderable {
def key: `Accept-Charset`.type = `Accept-Charset`
type Value = CharsetRange
def qValue(charset: Charset): QValue = {
def specific = values.collectFirst { case cs: CharsetRange.Atom => cs.qValue }
def splatted = values.collectFirst { case cs: CharsetRange.`*` => cs.qValue }
specific orElse splatted getOrElse QValue.Zero
}
def isSatisfiedBy(charset: Charset): Boolean = qValue(charset) > QValue.Zero
def map(f: CharsetRange => CharsetRange): `Accept-Charset` = `Accept-Charset`(values.map(f))
}
| hvesalai/http4s | core/src/main/scala/org/http4s/headers/Accept-Charset.scala | Scala | apache-2.0 | 947 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.concurrent.LinkedBlockingDeque
import java.util.concurrent.atomic.AtomicReference
import kafka.common.{InterBrokerSendThread, RequestAndCompletionHandler}
import kafka.raft.RaftManager
import kafka.utils.Logging
import org.apache.kafka.clients._
import org.apache.kafka.common.Node
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network._
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.AbstractRequest
import org.apache.kafka.common.security.JaasContext
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.utils.{LogContext, Time}
import org.apache.kafka.server.common.ApiMessageAndVersion
import scala.collection.Seq
import scala.compat.java8.OptionConverters._
import scala.jdk.CollectionConverters._
trait ControllerNodeProvider {
def get(): Option[Node]
def listenerName: ListenerName
def securityProtocol: SecurityProtocol
def saslMechanism: String
}
object MetadataCacheControllerNodeProvider {
def apply(
config: KafkaConfig,
metadataCache: kafka.server.MetadataCache
): MetadataCacheControllerNodeProvider = {
val listenerName = config.controlPlaneListenerName
.getOrElse(config.interBrokerListenerName)
val securityProtocol = config.controlPlaneSecurityProtocol
.getOrElse(config.interBrokerSecurityProtocol)
new MetadataCacheControllerNodeProvider(
metadataCache,
listenerName,
securityProtocol,
config.saslMechanismInterBrokerProtocol
)
}
}
class MetadataCacheControllerNodeProvider(
val metadataCache: kafka.server.MetadataCache,
val listenerName: ListenerName,
val securityProtocol: SecurityProtocol,
val saslMechanism: String
) extends ControllerNodeProvider {
override def get(): Option[Node] = {
metadataCache.getControllerId
.flatMap(metadataCache.getAliveBrokerNode(_, listenerName))
}
}
object RaftControllerNodeProvider {
def apply(raftManager: RaftManager[ApiMessageAndVersion],
config: KafkaConfig,
controllerQuorumVoterNodes: Seq[Node]): RaftControllerNodeProvider = {
val controllerListenerName = new ListenerName(config.controllerListenerNames.head)
val controllerSecurityProtocol = config.effectiveListenerSecurityProtocolMap.getOrElse(controllerListenerName, SecurityProtocol.forName(controllerListenerName.value()))
val controllerSaslMechanism = config.saslMechanismControllerProtocol
new RaftControllerNodeProvider(
raftManager,
controllerQuorumVoterNodes,
controllerListenerName,
controllerSecurityProtocol,
controllerSaslMechanism
)
}
}
/**
* Finds the controller node by checking the metadata log manager.
* This provider is used when we are using a Raft-based metadata quorum.
*/
class RaftControllerNodeProvider(val raftManager: RaftManager[ApiMessageAndVersion],
controllerQuorumVoterNodes: Seq[Node],
val listenerName: ListenerName,
val securityProtocol: SecurityProtocol,
val saslMechanism: String
) extends ControllerNodeProvider with Logging {
val idToNode = controllerQuorumVoterNodes.map(node => node.id() -> node).toMap
override def get(): Option[Node] = {
raftManager.leaderAndEpoch.leaderId.asScala.map(idToNode)
}
}
object BrokerToControllerChannelManager {
def apply(
controllerNodeProvider: ControllerNodeProvider,
time: Time,
metrics: Metrics,
config: KafkaConfig,
channelName: String,
threadNamePrefix: Option[String],
retryTimeoutMs: Long
): BrokerToControllerChannelManager = {
new BrokerToControllerChannelManagerImpl(
controllerNodeProvider,
time,
metrics,
config,
channelName,
threadNamePrefix,
retryTimeoutMs
)
}
}
trait BrokerToControllerChannelManager {
def start(): Unit
def shutdown(): Unit
def controllerApiVersions(): Option[NodeApiVersions]
def sendRequest(
request: AbstractRequest.Builder[_ <: AbstractRequest],
callback: ControllerRequestCompletionHandler
): Unit
}
/**
* This class manages the connection between a broker and the controller. It runs a single
* [[BrokerToControllerRequestThread]] which uses the broker's metadata cache as its own metadata to find
* and connect to the controller. The channel is async and runs the network connection in the background.
* The maximum number of in-flight requests are set to one to ensure orderly response from the controller, therefore
* care must be taken to not block on outstanding requests for too long.
*/
class BrokerToControllerChannelManagerImpl(
controllerNodeProvider: ControllerNodeProvider,
time: Time,
metrics: Metrics,
config: KafkaConfig,
channelName: String,
threadNamePrefix: Option[String],
retryTimeoutMs: Long
) extends BrokerToControllerChannelManager with Logging {
private val logContext = new LogContext(s"[BrokerToControllerChannelManager broker=${config.brokerId} name=$channelName] ")
private val manualMetadataUpdater = new ManualMetadataUpdater()
private val apiVersions = new ApiVersions()
private val currentNodeApiVersions = NodeApiVersions.create()
private val requestThread = newRequestThread
def start(): Unit = {
requestThread.start()
}
def shutdown(): Unit = {
requestThread.shutdown()
info(s"Broker to controller channel manager for $channelName shutdown")
}
private[server] def newRequestThread = {
val networkClient = {
val channelBuilder = ChannelBuilders.clientChannelBuilder(
controllerNodeProvider.securityProtocol,
JaasContext.Type.SERVER,
config,
controllerNodeProvider.listenerName,
controllerNodeProvider.saslMechanism,
time,
config.saslInterBrokerHandshakeRequestEnable,
logContext
)
val selector = new Selector(
NetworkReceive.UNLIMITED,
Selector.NO_IDLE_TIMEOUT_MS,
metrics,
time,
channelName,
Map("BrokerId" -> config.brokerId.toString).asJava,
false,
channelBuilder,
logContext
)
new NetworkClient(
selector,
manualMetadataUpdater,
config.brokerId.toString,
1,
50,
50,
Selectable.USE_DEFAULT_BUFFER_SIZE,
Selectable.USE_DEFAULT_BUFFER_SIZE,
config.requestTimeoutMs,
config.connectionSetupTimeoutMs,
config.connectionSetupTimeoutMaxMs,
time,
true,
apiVersions,
logContext
)
}
val threadName = threadNamePrefix match {
case None => s"BrokerToControllerChannelManager broker=${config.brokerId} name=$channelName"
case Some(name) => s"$name:BrokerToControllerChannelManager broker=${config.brokerId} name=$channelName"
}
new BrokerToControllerRequestThread(
networkClient,
manualMetadataUpdater,
controllerNodeProvider,
config,
time,
threadName,
retryTimeoutMs
)
}
/**
* Send request to the controller.
*
* @param request The request to be sent.
* @param callback Request completion callback.
*/
def sendRequest(
request: AbstractRequest.Builder[_ <: AbstractRequest],
callback: ControllerRequestCompletionHandler
): Unit = {
requestThread.enqueue(BrokerToControllerQueueItem(
time.milliseconds(),
request,
callback
))
}
def controllerApiVersions(): Option[NodeApiVersions] =
requestThread.activeControllerAddress().flatMap(
activeController => if (activeController.id() == config.brokerId)
Some(currentNodeApiVersions)
else
Option(apiVersions.get(activeController.idString()))
)
}
abstract class ControllerRequestCompletionHandler extends RequestCompletionHandler {
/**
* Fire when the request transmission time passes the caller defined deadline on the channel queue.
* It covers the total waiting time including retries which might be the result of individual request timeout.
*/
def onTimeout(): Unit
}
case class BrokerToControllerQueueItem(
createdTimeMs: Long,
request: AbstractRequest.Builder[_ <: AbstractRequest],
callback: ControllerRequestCompletionHandler
)
class BrokerToControllerRequestThread(
networkClient: KafkaClient,
metadataUpdater: ManualMetadataUpdater,
controllerNodeProvider: ControllerNodeProvider,
config: KafkaConfig,
time: Time,
threadName: String,
retryTimeoutMs: Long
) extends InterBrokerSendThread(threadName, networkClient, config.controllerSocketTimeoutMs, time, isInterruptible = false) {
private val requestQueue = new LinkedBlockingDeque[BrokerToControllerQueueItem]()
private val activeController = new AtomicReference[Node](null)
// Used for testing
@volatile
private[server] var started = false
def activeControllerAddress(): Option[Node] = {
Option(activeController.get())
}
private def updateControllerAddress(newActiveController: Node): Unit = {
activeController.set(newActiveController)
}
def enqueue(request: BrokerToControllerQueueItem): Unit = {
if (!started) {
throw new IllegalStateException("Cannot enqueue a request if the request thread is not running")
}
requestQueue.add(request)
if (activeControllerAddress().isDefined) {
wakeup()
}
}
def queueSize: Int = {
requestQueue.size
}
override def generateRequests(): Iterable[RequestAndCompletionHandler] = {
val currentTimeMs = time.milliseconds()
val requestIter = requestQueue.iterator()
while (requestIter.hasNext) {
val request = requestIter.next
if (currentTimeMs - request.createdTimeMs >= retryTimeoutMs) {
requestIter.remove()
request.callback.onTimeout()
} else {
val controllerAddress = activeControllerAddress()
if (controllerAddress.isDefined) {
requestIter.remove()
return Some(RequestAndCompletionHandler(
time.milliseconds(),
controllerAddress.get,
request.request,
handleResponse(request)
))
}
}
}
None
}
private[server] def handleResponse(queueItem: BrokerToControllerQueueItem)(response: ClientResponse): Unit = {
if (response.authenticationException != null) {
error(s"Request ${queueItem.request} failed due to authentication error with controller",
response.authenticationException)
queueItem.callback.onComplete(response)
} else if (response.versionMismatch != null) {
error(s"Request ${queueItem.request} failed due to unsupported version error",
response.versionMismatch)
queueItem.callback.onComplete(response)
} else if (response.wasDisconnected()) {
updateControllerAddress(null)
requestQueue.putFirst(queueItem)
} else if (response.responseBody().errorCounts().containsKey(Errors.NOT_CONTROLLER)) {
// just close the controller connection and wait for metadata cache update in doWork
activeControllerAddress().foreach { controllerAddress => {
networkClient.disconnect(controllerAddress.idString)
updateControllerAddress(null)
}}
requestQueue.putFirst(queueItem)
} else {
queueItem.callback.onComplete(response)
}
}
override def doWork(): Unit = {
if (activeControllerAddress().isDefined) {
super.pollOnce(Long.MaxValue)
} else {
debug("Controller isn't cached, looking for local metadata changes")
controllerNodeProvider.get() match {
case Some(controllerNode) =>
info(s"Recorded new controller, from now on will use broker $controllerNode")
updateControllerAddress(controllerNode)
metadataUpdater.setNodes(Seq(controllerNode).asJava)
case None =>
// need to backoff to avoid tight loops
debug("No controller defined in metadata cache, retrying after backoff")
super.pollOnce(maxTimeoutMs = 100)
}
}
}
override def start(): Unit = {
super.start()
started = true
}
}
| TiVo/kafka | core/src/main/scala/kafka/server/BrokerToControllerChannelManager.scala | Scala | apache-2.0 | 13,055 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package termination
import scala.collection.mutable.{Map => MutableMap, ListBuffer => MutableList}
trait TerminationChecker { self =>
val program: Program { val trees: Trees }
val context: inox.Context
import program.trees._
import program.symbols.{given, _}
def terminates(fd: FunDef): TerminationGuarantee
/* Caches for inductive lemmas */
type Postconditions = MutableMap[Identifier, Lambda]
type Applications = MutableMap[(Identifier, Identifier, Identifier), Seq[ValDef] => Expr]
type InductiveLemmas = Option[(Postconditions, Applications)]
/* End caches for inductive lemmas */
sealed abstract class TerminationGuarantee {
def isGuaranteed: Boolean
}
case class Terminates(reason: String, measure: Option[Expr], lemmas: InductiveLemmas) extends TerminationGuarantee {
override def isGuaranteed: Boolean = true
}
sealed abstract class NonTerminating extends TerminationGuarantee {
override def isGuaranteed: Boolean = false
def asString(using PrinterOptions): String = this match {
case NotWellFormed(sorts) =>
s"ADTs ${sorts.map(_.id.asString).mkString(", ")} are ill-formed"
case LoopsGivenInputs(fi) =>
if (fi.args.nonEmpty) {
val max = fi.tfd.params.map(_.asString.length).max
val model = for ((vd, e) <- fi.tfd.params zip fi.args) yield {
("%-" + max + "s -> %s").format(vd.asString, e.asString)
}
s"Function ${fi.id.asString} loops given inputs:\\n${model.mkString("\\n")}"
} else {
s"Function ${fi.id.asString} loops when called"
}
case MaybeLoopsGivenInputs(fi) =>
if (fi.args.nonEmpty) {
val max = fi.tfd.params.map(_.asString.length).max
val model = for ((vd, e) <- fi.tfd.params zip fi.args) yield {
("%-" + max + "s -> %s").format(vd.asString, e.asString)
}
s"Function ${fi.id.asString} maybe loops given inputs:\\n${model.mkString("\\n")}"
} else {
s"Function ${fi.id.asString} maybe loops when called"
}
case CallsNonTerminating(calls) =>
s"Calls non-terminating functions ${calls.map(_.id.asString).mkString(", ")}"
case DecreasesFailed(fd) =>
s"Decreases check failed for ${fd.id.asString}"
}
}
case class NotWellFormed(sorts: Set[ADTSort]) extends NonTerminating
case class LoopsGivenInputs(fi: FunctionInvocation) extends NonTerminating
case class MaybeLoopsGivenInputs(fi: FunctionInvocation) extends NonTerminating
case class CallsNonTerminating(calls: Set[FunDef]) extends NonTerminating
case class DecreasesFailed(fd: FunDef) extends NonTerminating
case object NoGuarantee extends TerminationGuarantee {
override def isGuaranteed: Boolean = false
}
object measureCache {
private val cache: MutableMap[FunDef, Expr] = MutableMap.empty
def add(p: (FunDef, Expr)) = cache += p
def get = cache
}
val integerOrdering: StructuralSize with SolverProvider {
val checker: self.type
}
val lexicographicOrdering: StructuralSize with SolverProvider {
val checker: self.type
}
val bvOrdering: StructuralSize with SolverProvider {
val checker: self.type
}
def get = {
integerOrdering.functions ++
lexicographicOrdering.functions ++
bvOrdering.functions
}
}
object TerminationChecker {
def apply(p: Program { val trees: Trees }, ctx: inox.Context)(sze: SizeFunctions { val trees: p.trees.type })
: TerminationChecker { val program: p.type } = {
class EncoderImpl(override val s: p.trees.type, override val t: stainless.trees.type)
extends inox.transformers.TreeTransformer {
override def transform(e: s.Expr): t.Expr = e match {
case s.Decreases(measure, body) => transform(body)
case _ => super.transform(e)
}
}
val encoderImpl = new EncoderImpl(p.trees, stainless.trees)
class ProcessingPipelineImpl(override val program: p.type, override val context: inox.Context) extends ProcessingPipeline {
self =>
class CFAImpl(override val program: self.program.type)
extends CICFA(program, self.context)
val cfa = new CFAImpl(self.program)
class IntegerOrderingImpl(override val checker: self.type, override val sizes: sze.type,
override val cfa: self.cfa.type, override val encoder: encoderImpl.type)
extends SumOrdering with StructuralSize with Strengthener with RelationBuilder with ChainBuilder
// We explicitly widen integerOrdering because scalac seems to ignore some of the mixed traits if we don't do so.
val integerOrdering: SumOrdering with StructuralSize with Strengthener with RelationBuilder with ChainBuilder {
val checker: self.type
} = new IntegerOrderingImpl(self, sze, self.cfa, encoderImpl)
class LexicographicOrderingImpl(override val checker: self.type, override val sizes: sze.type,
override val cfa: self.cfa.type, override val encoder: encoderImpl.type)
extends LexicographicOrdering with StructuralSize with Strengthener with RelationBuilder
// Ditto
val lexicographicOrdering: LexicographicOrdering with StructuralSize with Strengthener with RelationBuilder {
val checker: self.type
} = new LexicographicOrderingImpl(self, sze, self.cfa, encoderImpl)
class BVOrderingImpl(override val checker: self.type, override val sizes: sze.type,
override val cfa: self.cfa.type, override val encoder: encoderImpl.type)
extends BVOrdering with StructuralSize with Strengthener with RelationBuilder
// Ditto
val bvOrdering: BVOrdering with StructuralSize with Strengthener with RelationBuilder {
val checker: self.type
} = new BVOrderingImpl(self, sze, self.cfa, encoderImpl)
class RecursionProcessorImpl(override val checker: self.type,
override val ordering: integerOrdering.type)
extends RecursionProcessor(checker, ordering)
val recursionProcessor = new RecursionProcessorImpl(self, integerOrdering)
class DecreasesProcessorImpl(override val checker: self.type, override val ordering: integerOrdering.type)
extends DecreasesProcessor(checker, ordering)
val decreasesProcessor = new DecreasesProcessorImpl(self, integerOrdering)
class SelfCallsProcessorImpl(override val checker: self.type)
extends SelfCallsProcessor(checker)
val selfCallsProcessor = new SelfCallsProcessorImpl(self)
class IntegerProcessorImpl(override val checker: self.type, override val ordering: integerOrdering.type)
extends RelationProcessor(checker, ordering)
val integerProcessor = new IntegerProcessorImpl(self, integerOrdering)
class LexicographicProcessorImpl(override val checker: self.type, override val ordering: lexicographicOrdering.type)
extends RelationProcessor(checker, ordering)
val lexicographicProcessor = new LexicographicProcessorImpl(self, lexicographicOrdering)
class BVProcessorImpl(override val checker: self.type, override val ordering: bvOrdering.type)
extends RelationProcessor(checker, ordering)
val bvProcessor = new BVProcessorImpl(self, bvOrdering)
class ChainProcessorImpl(override val checker: self.type, override val ordering: integerOrdering.type)
extends ChainProcessor(checker, ordering)
val chainProcessor = new ChainProcessorImpl(self, integerOrdering)
class LoopProcessorImpl(override val checker: self.type, override val ordering: integerOrdering.type)
extends LoopProcessor(checker, ordering)
val loopProcessor = new LoopProcessorImpl(self, integerOrdering)
val processors = {
List(
recursionProcessor,
selfCallsProcessor,
decreasesProcessor,
integerProcessor,
lexicographicProcessor,
bvProcessor,
chainProcessor,
loopProcessor,
)
}
}
new ProcessingPipelineImpl(p, ctx)
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/termination/TerminationChecker.scala | Scala | apache-2.0 | 8,197 |
package org.channing.free
final case class PostCommit(f: () => Unit) | channingwalton/doobie-play | src/main/scala/org/channing/free/PostCommit.scala | Scala | unlicense | 69 |
package org.denigma.graphs
import org.denigma.graphs.core.SpriteGraph
import org.denigma.graphs.semantic.{SemanticEdge, SemanticNode}
import org.denigma.graphs.tools.HtmlSprite
import org.denigma.graphs.visual.{Defs, EdgeView, LineParams, NodeView}
import org.scalajs.dom
import org.scalajs.dom.{Event, HTMLElement}
import org.scalax.semweb.rdf.{BasicTriplet, IRI, Res}
import org.scalax.semweb.shex.PropertyModel
import rx.core.Var
class SemanticGraph(val container:HTMLElement,
val width:Double = dom.window.innerWidth,
val height:Double = dom.window.innerHeight)
extends SpriteGraph
{
override type NodeId = Res
override type EdgeId = BasicTriplet
override type NodeData = Var[PropertyModel]
override type EdgeData = Var[IRI]
override type ViewOfNode = NodeView[NodeData]
override type ViewOfEdge = EdgeView[EdgeData]
type Node = SemanticNode
type Edge = SemanticEdge
override def removeEdge(id:EdgeId): this.type = edges.get(id) match {
case Some(e)=>
cssScene.remove(e.view.sprite)
scene.remove(e.view.arrow)
edges = edges - id
this
case None=> throw new Exception("node that should be removed is not found"); this
}
override def removeNode(id:NodeId):this.type = nodes.get(id) match {
case Some(n)=>
cssScene.remove(n.view.sprite)
val toRemove = edges.filter{case (key,value)=>value.from==n || value.to==n}
toRemove.foreach{case (key,value)=>this.removeEdge(key)}
nodes = nodes - id; this
case None => throw new Exception("node that should be removed is not found"); this
}
def addNode(id:NodeId,data:NodeData, element:HTMLElement, colorName:String):Node =
this.addNode(id,data, new ViewOfNode(data,new HtmlSprite(element),colorName))
override def addNode(id:NodeId,data:NodeData, view:ViewOfNode):Node =
{
import view.{sprite => sp}
this.randomPos(view.sprite)
val n = new SemanticNode(data,view)
sp.element.addEventListener( "mousedown", (this.onMouseDown(sp) _).asInstanceOf[Function[Event,_ ]] )
cssScene.add(view.sprite)
this.nodes = nodes + (id->n)
n
}
def addEdge(id:EdgeId,from:Node,to:Node, data: EdgeData,element:HTMLElement):Edge =
{
val color = Defs.colorMap.get(from.view.colorName) match {
case Some(c)=>c
case None=>Defs.color
}
val sp = new HtmlSprite(element)
element.addEventListener( "mousedown", (this.onMouseDown(sp) _).asInstanceOf[Function[Event,_ ]] )
this.controls.moveTo(sp.position)
//sp.visible = false
addEdge(id,from,to,data,new EdgeView(from.view.sprite,to.view.sprite,data,sp, LineParams(color)))
}
override def addEdge(id:EdgeId,from:Node,to:Node, data: EdgeData,view:ViewOfEdge):Edge =
{
cssScene.add(view.sprite)
val e = new Edge(from,to,data,view)
scene.add(view.arrow)
edges = edges + (id->e)
e
}
} | antonkulaga/semantic-graph | graphs/src/main/scala/org/denigma/graphs/semantic/SemanticGraph.scala | Scala | mpl-2.0 | 2,907 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.net.URI
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources.BaseRelation
/**
* A command used to create a data source table.
*
* Note: This is different from [[CreateTableCommand]]. Please check the syntax for difference.
* This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* [(col1 data_type [COMMENT col_comment], ...)]
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* }}}
*/
case class CreateDataSourceTableCommand(table: CatalogTable, ignoreIfExists: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
assert(table.tableType != CatalogTableType.VIEW)
assert(table.provider.isDefined)
val sessionState = sparkSession.sessionState
if (sessionState.catalog.tableExists(table.identifier)) {
if (ignoreIfExists) {
return Seq.empty[Row]
} else {
throw new AnalysisException(s"Table ${table.identifier.unquotedString} already exists.")
}
}
// Create the relation to validate the arguments before writing the metadata to the metastore,
// and infer the table schema and partition if users didn't specify schema in CREATE TABLE.
val pathOption = table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_))
// Fill in some default table options from the session conf
val tableWithDefaultOptions = table.copy(
identifier = table.identifier.copy(
database = Some(
table.identifier.database.getOrElse(sessionState.catalog.getCurrentDatabase))),
tracksPartitionsInCatalog = sessionState.conf.manageFilesourcePartitions)
val dataSource: BaseRelation =
DataSource(
sparkSession = sparkSession,
userSpecifiedSchema = if (table.schema.isEmpty) None else Some(table.schema),
partitionColumns = table.partitionColumnNames,
className = table.provider.get,
bucketSpec = table.bucketSpec,
options = table.storage.properties ++ pathOption,
// As discussed in SPARK-19583, we don't check if the location is existed
catalogTable = Some(tableWithDefaultOptions)).resolveRelation(checkFilesExist = false)
val partitionColumnNames = if (table.schema.nonEmpty) {
table.partitionColumnNames
} else {
// This is guaranteed in `PreprocessDDL`.
assert(table.partitionColumnNames.isEmpty)
dataSource match {
case r: HadoopFsRelation => r.partitionSchema.fieldNames.toSeq
case _ => Nil
}
}
val newTable = table.copy(
schema = dataSource.schema,
partitionColumnNames = partitionColumnNames,
// If metastore partition management for file source tables is enabled, we start off with
// partition provider hive, but no partitions in the metastore. The user has to call
// `msck repair table` to populate the table partitions.
tracksPartitionsInCatalog = partitionColumnNames.nonEmpty &&
sessionState.conf.manageFilesourcePartitions)
// We will return Nil or throw exception at the beginning if the table already exists, so when
// we reach here, the table should not exist and we should set `ignoreIfExists` to false.
sessionState.catalog.createTable(newTable, ignoreIfExists = false)
Seq.empty[Row]
}
}
/**
* A command used to create a data source table using the result of a query.
*
* Note: This is different from `CreateHiveTableAsSelectCommand`. Please check the syntax for
* difference. This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* USING format OPTIONS ([option1_name "option1_value", option2_name "option2_value", ...])
* AS SELECT ...
* }}}
*/
case class CreateDataSourceTableAsSelectCommand(
table: CatalogTable,
mode: SaveMode,
query: LogicalPlan)
extends RunnableCommand {
override def innerChildren: Seq[LogicalPlan] = Seq(query)
override def run(sparkSession: SparkSession): Seq[Row] = {
assert(table.tableType != CatalogTableType.VIEW)
assert(table.provider.isDefined)
val sessionState = sparkSession.sessionState
val db = table.identifier.database.getOrElse(sessionState.catalog.getCurrentDatabase)
val tableIdentWithDB = table.identifier.copy(database = Some(db))
val tableName = tableIdentWithDB.unquotedString
if (sessionState.catalog.tableExists(tableIdentWithDB)) {
assert(mode != SaveMode.Overwrite,
s"Expect the table $tableName has been dropped when the save mode is Overwrite")
if (mode == SaveMode.ErrorIfExists) {
throw new AnalysisException(s"Table $tableName already exists. You need to drop it first.")
}
if (mode == SaveMode.Ignore) {
// Since the table already exists and the save mode is Ignore, we will just return.
return Seq.empty
}
saveDataIntoTable(
sparkSession, table, table.storage.locationUri, query, SaveMode.Append, tableExists = true)
} else {
assert(table.schema.isEmpty)
val tableLocation = if (table.tableType == CatalogTableType.MANAGED) {
Some(sessionState.catalog.defaultTablePath(table.identifier))
} else {
table.storage.locationUri
}
val result = saveDataIntoTable(
sparkSession, table, tableLocation, query, SaveMode.Overwrite, tableExists = false)
val newTable = table.copy(
storage = table.storage.copy(locationUri = tableLocation),
// We will use the schema of resolved.relation as the schema of the table (instead of
// the schema of df). It is important since the nullability may be changed by the relation
// provider (for example, see org.apache.spark.sql.parquet.DefaultSource).
schema = result.schema)
sessionState.catalog.createTable(newTable, ignoreIfExists = false)
result match {
case fs: HadoopFsRelation if table.partitionColumnNames.nonEmpty &&
sparkSession.sqlContext.conf.manageFilesourcePartitions =>
// Need to recover partitions into the metastore so our saved data is visible.
sessionState.executePlan(AlterTableRecoverPartitionsCommand(table.identifier)).toRdd
case _ =>
}
}
Seq.empty[Row]
}
private def saveDataIntoTable(
session: SparkSession,
table: CatalogTable,
tableLocation: Option[URI],
data: LogicalPlan,
mode: SaveMode,
tableExists: Boolean): BaseRelation = {
// Create the relation based on the input logical plan: `data`.
val pathOption = tableLocation.map("path" -> CatalogUtils.URIToString(_))
val dataSource = DataSource(
session,
className = table.provider.get,
partitionColumns = table.partitionColumnNames,
bucketSpec = table.bucketSpec,
options = table.storage.properties ++ pathOption,
catalogTable = if (tableExists) Some(table) else None)
try {
dataSource.writeAndRead(mode, query)
} catch {
case ex: AnalysisException =>
logError(s"Failed to write to table ${table.identifier.unquotedString}", ex)
throw ex
}
}
}
| someorz/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala | Scala | apache-2.0 | 8,308 |
package org.betterers.spark.gis
import java.io.CharArrayWriter
import java.nio.ByteBuffer
import org.apache.spark.Logging
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.codehaus.jackson.JsonFactory
import scala.language.postfixOps
import scala.util.Try
/** User defined type for [[Geometry]] instances
*
* @author Ubik <emiliano.leporati@gmail.com>
*/
class GeometryType extends UserDefinedType[Geometry] with Logging {
/** [[Geometry]] values are serialized as strings */
override def sqlType: DataType = StringType
override def userClass: Class[Geometry] = classOf[Geometry]
/** Translates a [[Geometry]] to a GeoJSON [[String]] */
override def serialize(obj: Any): String = {
obj match {
case g: Geometry =>
g.impl.toGeoJson
case _ =>
throw new IllegalArgumentException(s"Invalid Geometry value to serialize: $obj")
}
}
/** Translates a [[Geometry]], a [[String]] containing a GeoJSON, or a [[Map]] obtained
* during JSON deserialization to a [[Geometry]]
*/
override def deserialize(datum: Any): Geometry = {
datum match {
case g: Geometry => g
case b: Array[Byte] => Geometry.fromBinary(b)
case b: ByteBuffer => Geometry.fromBinary(b)
case s: UTF8String => deserialize(s.toString)
case s: String =>
Try {
Geometry.fromGeoJson(s)
} orElse {
logWarning("Not a GeoJSON")
Try(Geometry.fromString(s))
} get
case r: Map[_, _] =>
val writer = new CharArrayWriter()
val gen = new JsonFactory().createJsonGenerator(writer)
def writeJson: Any => Unit = {
case m: Map[_, _] =>
gen.writeStartObject()
m.foreach { kv =>
gen.writeFieldName(kv._1.toString)
writeJson(kv._2)
}
gen.writeEndObject()
case a: Seq[_] =>
gen.writeStartArray()
a.foreach(writeJson)
gen.writeEndArray()
case x =>
gen.writeObject(x)
}
writeJson(r)
gen.flush()
val json = writer.toString
gen.close()
Geometry.fromGeoJson(json)
case x =>
throw new IllegalArgumentException(s"Can't deserialize to Geometry: ${x.getClass.getSimpleName}: $x")
}
}
}
/** Default [[GeometryType]] instance */
object GeometryType {
/** [[GeometryType]] instance to be used in schemas */
val Instance = new GeometryType()
}
| drubbo/SparkGIS | src/main/scala/org/betterers/spark/gis/GeometryType.scala | Scala | apache-2.0 | 2,538 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.filter
import org.geotools.filter.visitor.DefaultFilterVisitor
import org.opengis.filter._
import scala.collection.JavaConversions._
class OrSplittingFilter extends DefaultFilterVisitor {
// This function really returns a Seq[Filter].
override def visit(filter: Or, data: scala.Any): AnyRef = {
filter.getChildren.flatMap { subfilter =>
this.visit(subfilter, data)
}
}
def visit(filter: Filter, data: scala.Any): Seq[Filter] = {
filter match {
case o: Or => visit(o, data).asInstanceOf[Seq[Filter]]
case _ => Seq(filter)
}
}
}
| mmatz-ccri/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/filter/OrSplittingFilter.scala | Scala | apache-2.0 | 1,231 |
package com.twitter.finagle.factory
import com.twitter.finagle._
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.util.{ Activity, Closable, Future, Promise, Time, Var}
import java.net.SocketAddress
import scala.collection.immutable
/**
* Proxies requests to the current definiton of 'name', queueing
* requests while it is pending.
*/
private class DynNameFactory[Req, Rep](
name: Activity[Name.Bound],
newService: (Name.Bound, ClientConnection) => Future[Service[Req, Rep]])
extends ServiceFactory[Req, Rep] {
private sealed trait State
private case class Pending(q: immutable.Queue[(ClientConnection, Promise[Service[Req, Rep]])])
extends State
private case class Named(name: Name.Bound) extends State
private case class Failed(exc: Throwable) extends State
private case class Closed() extends State
@volatile private[this] var state: State = Pending(immutable.Queue.empty)
private[this] val sub = name.run.changes respond {
case Activity.Ok(name) => synchronized {
state match {
case Pending(q) =>
state = Named(name)
for ((conn, p) <- q) p.become(this(conn))
case Failed(_) | Named(_) =>
state = Named(name)
case Closed() => //
}
}
case Activity.Failed(exc) => synchronized {
state match {
case Pending(q) =>
for ((_, p) <- q) p.setException(exc)
state = Failed(exc)
case Failed(_) =>
state = Failed(exc)
case Named(_) | Closed() =>
}
}
case Activity.Pending =>
}
def apply(conn: ClientConnection) = state match {
case Named(name) => newService(name, conn)
case Closed() => Future.exception(new ServiceClosedException)
case Failed(exc) => Future.exception(exc)
case Pending(_) => applySync(conn)
}
private[this] def applySync(conn: ClientConnection) = synchronized {
state match {
case Pending(q) =>
val p = new Promise[Service[Req, Rep]]
val el = (conn, p)
p setInterruptHandler { case exc =>
synchronized {
state match {
case Pending(q) if q contains el =>
state = Pending(q filter (_ != el))
p.setException(new CancelledConnectionException(exc))
case _ =>
}
}
}
state = Pending(q enqueue el)
p
case other => apply(conn)
}
}
def close(deadline: Time) = {
val prev = synchronized {
val prev = state
state = Closed()
prev
}
prev match {
case Pending(q) =>
val exc = new ServiceClosedException
for ((_, p) <- q)
p.setException(exc)
case _ => //
}
sub.close(deadline)
}
}
/**
* A factory that routes to the local binding of the passed-in
* [[com.twitter.finagle.Name.Path Name.Path]]. It calls `newFactory`
* to mint a new [[com.twitter.finagle.ServiceFactory
* ServiceFactory]] for novel name evaluations.
*
* A two-level caching scheme is employed for efficiency:
*
* First, Name-trees are evaluated by the default evaluation
* strategy, which produces a set of [[com.twitter.finagle.Name.Bound
* Name.Bound]]; these name-sets are cached individually so that they
* can be reused. Should different name-tree evaluations yield the
* same name-set, they will use the same underlying (cached) factory.
*
* Secondly, in order to avoid evaluating names unnecessarily, we
* also cache the evaluation relative to a [[com.twitter.finagle.Dtab
* Dtab]]. This is done to short-circuit the evaluation process most
* of the time (as we expect most requests to share a namer).
*
* @bug This is far too complicated, though it seems necessary for
* efficiency when namers are occasionally overriden.
*
* @bug 'isAvailable' has a funny definition.
*/
private[finagle] class BindingFactory[Req, Rep](
path: Path,
newFactory: Var[Addr] => ServiceFactory[Req, Rep],
statsReceiver: StatsReceiver = NullStatsReceiver,
maxNameCacheSize: Int = 8,
maxNamerCacheSize: Int = 4)
extends ServiceFactory[Req, Rep] {
private[this] val tree = NameTree.Leaf(path)
private[this] val nameCache =
new ServiceFactoryCache[Name.Bound, Req, Rep](
bound => newFactory(bound.addr),
statsReceiver.scope("namecache"), maxNameCacheSize)
private[this] val dtabCache = {
val newFactory: Dtab => ServiceFactory[Req, Rep] = { dtab =>
val namer = dtab orElse Namer.global
val name: Activity[Name.Bound] = namer.bind(tree).map(_.eval) flatMap {
case None => Activity.exception(new NoBrokersAvailableException)
case Some(set) if set.isEmpty => Activity.exception(new NoBrokersAvailableException)
case Some(set) if set.size == 1 => Activity.value(set.head)
case Some(set) => Activity.value(Name.all(set))
}
new DynNameFactory(name, nameCache.apply)
}
new ServiceFactoryCache[Dtab, Req, Rep](
newFactory, statsReceiver.scope("dtabcache"),
maxNamerCacheSize)
}
def apply(conn: ClientConnection): Future[Service[Req, Rep]] =
dtabCache(Dtab.base ++ Dtab.local, conn)
def close(deadline: Time) =
Closable.sequence(dtabCache, nameCache).close(deadline)
override def isAvailable = dtabCache.isAvailable
}
| JustinTulloss/finagle | finagle-core/src/main/scala/com/twitter/finagle/factory/BindingFactory.scala | Scala | apache-2.0 | 5,342 |
package helpers.changekeeper
object ValidVRMFormat {
val allValidVrmFormats = Seq(
"A9",
"A99",
"A999",
"A9999",
"AA9",
"AA99",
"AA999",
"AA9999",
"AAA9",
"AAA99",
"AAA999",
"AAA9999",
"AAA9A",
"AAA99A",
"AAA999A",
"9A",
"9AA",
"9AAA",
"99A",
"99AA",
"99AAA",
"999A",
"999AA",
"999AAA",
"9999A",
"9999AA",
"A9AAA",
"A99AAA",
"A999AAA",
"AA99AAA",
"9999AAA"
)
}
| dvla/vehicles-change-keeper-online | test/helpers/changekeeper/ValidVRMFormat.scala | Scala | mit | 490 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.domain
import play.api.libs.json.{Reads, Writes}
case class Org(org: String) extends TaxIdentifier with SimpleName {
override def toString = value
val name = "org"
def value = org
}
object Org extends (String => Org) {
implicit val orgWrite: Writes[Org] = new SimpleObjectWrites[Org](_.value)
implicit val orgRead: Reads[Org] = new SimpleObjectReads[Org]("org", Org.apply)
}
| hmrc/domain | src/main/scala/uk/gov/hmrc/domain/Org.scala | Scala | apache-2.0 | 1,013 |
// The test which this should perform but does not
// is that f1 is recognized as irrefutable and f2 is not
// This can be recognized via the generated classes:
//
// A$$anonfun$f1$1.class
// A$$anonfun$f2$1.class
// A$$anonfun$f2$2.class
//
// The extra one in $f2$ is the filter.
//
// !!! Marking with exclamation points so maybe someday
// this test will be finished.
class A {
case class Foo[T](x: T)
def f1(xs: List[Foo[Int]]) = {
for (Foo(x: Int) <- xs) yield x
}
def f2(xs: List[Foo[Any]]) = {
for (Foo(x: Int) <- xs) yield x
}
}
| yusuke2255/dotty | tests/untried/pos/irrefutable.scala | Scala | bsd-3-clause | 557 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.util.exceptions
class MultipleFailures(failures: List[Throwable]) extends Exception(failures.foldLeft("multiple failures:"){
case (s,t) => s"$s\\n${t.getMessage}"
}, failures.headOption.getOrElse(new NoSuchElementException("empty failures seq"))) | nruppin/CM-Well | server/cmwell-util/src/main/scala/cmwell/util/exceptions/MultipleFailures.scala | Scala | apache-2.0 | 881 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the SonarInstance entity.
*/
class SonarInstanceGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://localhost:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the SonarInstance entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJSON
.check(header.get("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all sonarInstances")
.get("/gamecraftsonarmanager/api/sonar-instances")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new sonarInstance")
.post("/gamecraftsonarmanager/api/sonar-instances")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "sonarInstanceName":"SAMPLE_TEXT", "sonarInstanceDescription":"SAMPLE_TEXT", "sonarInstanceRunnerPath":"SAMPLE_TEXT", "sonarInstanceEnabled":null}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_sonarInstance_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created sonarInstance")
.get("/gamecraftsonarmanager${new_sonarInstance_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created sonarInstance")
.delete("/gamecraftsonarmanager${new_sonarInstance_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(Integer.getInteger("users", 100)) over (Integer.getInteger("ramp", 1) minutes))
).protocols(httpConf)
}
| iMartinezMateu/gamecraft | gamecraft-sonar-manager/src/test/gatling/user-files/simulations/SonarInstanceGatlingTest.scala | Scala | mit | 3,628 |
package com.twitter.finagle
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.{AssertionsForJUnit, JUnitRunner}
@RunWith(classOf[JUnitRunner])
class NameTreeParsersTest extends FunSuite with AssertionsForJUnit {
test("parsePath") {
assert(NameTreeParsers.parsePath("/") == Path.empty)
assert(NameTreeParsers.parsePath(" /foo/bar ") == Path.Utf8("foo", "bar"))
assert(NameTreeParsers.parsePath("/\\x66\\x6f\\x6F") == Path.Utf8("foo"))
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("") }
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/foo/bar/") }
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/{}") }
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/\\?") }
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/\\x?") }
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/\\x0?") }
}
test("error messages") {
assert(
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/foo^bar") }
.getMessage contains "'/foo[^]bar'")
assert(
intercept[IllegalArgumentException] { NameTreeParsers.parsePath("/foo/bar/") }
.getMessage contains "'/foo/bar/[]'")
}
test("parseNameTree") {
val defaultWeight = NameTree.Weighted.defaultWeight
assert(NameTreeParsers.parseNameTree("! | ~ | $") == NameTree.Alt(NameTree.Fail, NameTree.Neg, NameTree.Empty))
assert(NameTreeParsers.parseNameTree("/foo/bar") == NameTree.Leaf(Path.Utf8("foo", "bar")))
assert(NameTreeParsers.parseNameTree(" /foo & /bar ") ==
NameTree.Union(
NameTree.Weighted(defaultWeight, NameTree.Leaf(Path.Utf8("foo"))),
NameTree.Weighted(defaultWeight, NameTree.Leaf(Path.Utf8("bar")))))
assert(NameTreeParsers.parseNameTree(" /foo | /bar ") ==
NameTree.Alt(NameTree.Leaf(Path.Utf8("foo")), NameTree.Leaf(Path.Utf8("bar"))))
assert(NameTreeParsers.parseNameTree("/foo & /bar | /bar & /baz") ==
NameTree.Alt(
NameTree.Union(
NameTree.Weighted(defaultWeight, NameTree.Leaf(Path.Utf8("foo"))),
NameTree.Weighted(defaultWeight, NameTree.Leaf(Path.Utf8("bar")))),
NameTree.Union(
NameTree.Weighted(defaultWeight, NameTree.Leaf(Path.Utf8("bar"))),
NameTree.Weighted(defaultWeight, NameTree.Leaf(Path.Utf8("baz"))))))
assert(NameTreeParsers.parseNameTree("1 * /foo & 2 * /bar | .5 * /bar & .5 * /baz") ==
NameTree.Alt(
NameTree.Union(
NameTree.Weighted(1D, NameTree.Leaf(Path.Utf8("foo"))),
NameTree.Weighted(2D, NameTree.Leaf(Path.Utf8("bar")))),
NameTree.Union(
NameTree.Weighted(0.5D, NameTree.Leaf(Path.Utf8("bar"))),
NameTree.Weighted(0.5D, NameTree.Leaf(Path.Utf8("baz"))))))
intercept[IllegalArgumentException] { NameTreeParsers.parseNameTree("") }
intercept[IllegalArgumentException] { NameTreeParsers.parseNameTree("#") }
intercept[IllegalArgumentException] { NameTreeParsers.parseNameTree("/foo &") }
intercept[IllegalArgumentException] { NameTreeParsers.parseNameTree("/foo & 0.1.2 * /bar")}
intercept[IllegalArgumentException] { NameTreeParsers.parseNameTree("/foo & . * /bar")}
}
test("parseDentry") {
assert(NameTreeParsers.parseDentry("/=>!") == Dentry(Path.empty, NameTree.Fail))
assert(NameTreeParsers.parseDentry("/ => !") == Dentry(Path.empty, NameTree.Fail))
intercept[IllegalArgumentException] { NameTreeParsers.parseDentry("/&!") }
}
test("parseDtab") {
assert(NameTreeParsers.parseDtab("") == Dtab.empty)
assert(NameTreeParsers.parseDtab(" /=>! ") == Dtab(IndexedSeq(Dentry(Path.empty, NameTree.Fail))))
assert(NameTreeParsers.parseDtab("/=>!;") == Dtab(IndexedSeq(Dentry(Path.empty, NameTree.Fail))))
assert(NameTreeParsers.parseDtab("/=>!;/foo=>/bar") ==
Dtab(IndexedSeq(
Dentry(Path.empty, NameTree.Fail),
Dentry(Path.Utf8("foo"), NameTree.Leaf(Path.Utf8("bar"))))))
}
}
| liamstewart/finagle | finagle-core/src/test/scala/com/twitter/finagle/NameTreeParsersTest.scala | Scala | apache-2.0 | 4,048 |
package ucesoft.cbm.expansion
trait ExpansionPortConfigurationListener {
def expansionPortConfigurationChanged(game:Boolean,exrom:Boolean) : Unit
} | abbruzze/kernal64 | Kernal64/src/ucesoft/cbm/expansion/ExpansionPortConfigurationListener.scala | Scala | mit | 149 |
package com.komanov.junk.scl_8869
// https://youtrack.jetbrains.com/issue/SCL-8869
class WrongConvertToAnonymous extends MockitoStubsCut {
val myInterface: MyInterface = null
myInterface.convert(1) answers (o => o match {
case t: Int => t.toString
})
myInterface.convert(1) answers (_ match {
case t: Int => t.toString
})
}
trait MyInterface {
def convert(i: Int): String
}
trait MockitoStubsCut {
implicit def theStubbed[T](c: => T): Stubbed[T] = new Stubbed(c)
class Stubbed[T](c: => T) {
def answers(function: Any => T) = {
}
def answers(function: (Any, Any) => T) = {
}
}
}
| dkomanov/stuff | src/com/komanov/junk/scl_8869/WrongConvertToAnonymous.scala | Scala | mit | 628 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.{File, IOException}
import java.nio.ByteBuffer
import java.nio.channels.FileChannel
import java.nio.file.StandardOpenOption
import kafka.utils.{Logging, nonthreadsafe}
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.utils.Utils
import scala.collection.mutable.ListBuffer
private[log] case class TxnIndexSearchResult(abortedTransactions: List[AbortedTxn], isComplete: Boolean)
/**
* The transaction index maintains metadata about the aborted transactions for each segment. This includes
* the start and end offsets for the aborted transactions and the last stable offset (LSO) at the time of
* the abort. This index is used to find the aborted transactions in the range of a given fetch request at
* the READ_COMMITTED isolation level.
*
* There is at most one transaction index for each log segment. The entries correspond to the transactions
* whose commit markers were written in the corresponding log segment. Note, however, that individual transactions
* may span multiple segments. Recovering the index therefore requires scanning the earlier segments in
* order to find the start of the transactions.
*/
@nonthreadsafe
class TransactionIndex(val startOffset: Long, @volatile var file: File) extends Logging {
// note that the file is not created until we need it
@volatile private var maybeChannel: Option[FileChannel] = None
private var lastOffset: Option[Long] = None
if (file.exists)
openChannel()
def append(abortedTxn: AbortedTxn): Unit = {
lastOffset.foreach { offset =>
if (offset >= abortedTxn.lastOffset)
throw new IllegalArgumentException("The last offset of appended transactions must increase sequentially")
}
lastOffset = Some(abortedTxn.lastOffset)
Utils.writeFully(channel, abortedTxn.buffer.duplicate())
}
def flush(): Unit = maybeChannel.foreach(_.force(true))
def delete(): Boolean = {
maybeChannel.forall { channel =>
channel.force(true)
close()
file.delete()
}
}
private def channel: FileChannel = {
maybeChannel match {
case Some(channel) => channel
case None => openChannel()
}
}
private def openChannel(): FileChannel = {
val channel = FileChannel.open(file.toPath, StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.CREATE)
maybeChannel = Some(channel)
channel.position(channel.size)
channel
}
def truncate() = {
maybeChannel.foreach(_.truncate(0))
lastOffset = None
}
def close(): Unit = {
maybeChannel.foreach(_.close())
maybeChannel = None
}
def renameTo(f: File): Unit = {
try {
if (file.exists)
Utils.atomicMoveWithFallback(file.toPath, f.toPath)
} finally file = f
}
def truncateTo(offset: Long): Unit = {
val buffer = ByteBuffer.allocate(AbortedTxn.TotalSize)
var newLastOffset: Option[Long] = None
for ((abortedTxn, position) <- iterator(() => buffer)) {
if (abortedTxn.lastOffset >= offset) {
channel.truncate(position)
lastOffset = newLastOffset
return
}
newLastOffset = Some(abortedTxn.lastOffset)
}
}
private def iterator(allocate: () => ByteBuffer = () => ByteBuffer.allocate(AbortedTxn.TotalSize)): Iterator[(AbortedTxn, Int)] = {
maybeChannel match {
case None => Iterator.empty
case Some(channel) =>
var position = 0
new Iterator[(AbortedTxn, Int)] {
override def hasNext: Boolean = channel.position - position >= AbortedTxn.TotalSize
override def next(): (AbortedTxn, Int) = {
try {
val buffer = allocate()
Utils.readFully(channel, buffer, position)
buffer.flip()
val abortedTxn = new AbortedTxn(buffer)
if (abortedTxn.version > AbortedTxn.CurrentVersion)
throw new KafkaException(s"Unexpected aborted transaction version ${abortedTxn.version}, " +
s"current version is ${AbortedTxn.CurrentVersion}")
val nextEntry = (abortedTxn, position)
position += AbortedTxn.TotalSize
nextEntry
} catch {
case e: IOException =>
// We received an unexpected error reading from the index file. We propagate this as an
// UNKNOWN error to the consumer, which will cause it to retry the fetch.
throw new KafkaException(s"Failed to read from the transaction index $file", e)
}
}
}
}
}
def allAbortedTxns: List[AbortedTxn] = {
iterator().map(_._1).toList
}
/**
* Collect all aborted transactions which overlap with a given fetch range.
*
* @param fetchOffset Inclusive first offset of the fetch range
* @param upperBoundOffset Exclusive last offset in the fetch range
* @return An object containing the aborted transactions and whether the search needs to continue
* into the next log segment.
*/
def collectAbortedTxns(fetchOffset: Long, upperBoundOffset: Long): TxnIndexSearchResult = {
val abortedTransactions = ListBuffer.empty[AbortedTxn]
for ((abortedTxn, _) <- iterator()) {
if (abortedTxn.lastOffset >= fetchOffset && abortedTxn.firstOffset < upperBoundOffset)
abortedTransactions += abortedTxn
if (abortedTxn.lastStableOffset >= upperBoundOffset)
return TxnIndexSearchResult(abortedTransactions.toList, isComplete = true)
}
TxnIndexSearchResult(abortedTransactions.toList, isComplete = false)
}
def sanityCheck(): Unit = {
val buffer = ByteBuffer.allocate(AbortedTxn.TotalSize)
for ((abortedTxn, _) <- iterator(() => buffer)) {
require(abortedTxn.lastOffset >= startOffset)
}
}
}
private[log] object AbortedTxn {
val VersionOffset = 0
val VersionSize = 2
val ProducerIdOffset = VersionOffset + VersionSize
val ProducerIdSize = 8
val FirstOffsetOffset = ProducerIdOffset + ProducerIdSize
val FirstOffsetSize = 8
val LastOffsetOffset = FirstOffsetOffset + FirstOffsetSize
val LastOffsetSize = 8
val LastStableOffsetOffset = LastOffsetOffset + LastOffsetSize
val LastStableOffsetSize = 8
val TotalSize = LastStableOffsetOffset + LastStableOffsetSize
val CurrentVersion: Short = 0
}
private[log] class AbortedTxn(val buffer: ByteBuffer) {
import AbortedTxn._
def this(producerId: Long,
firstOffset: Long,
lastOffset: Long,
lastStableOffset: Long) = {
this(ByteBuffer.allocate(AbortedTxn.TotalSize))
buffer.putShort(CurrentVersion)
buffer.putLong(producerId)
buffer.putLong(firstOffset)
buffer.putLong(lastOffset)
buffer.putLong(lastStableOffset)
buffer.flip()
}
def this(completedTxn: CompletedTxn, lastStableOffset: Long) =
this(completedTxn.producerId, completedTxn.firstOffset, completedTxn.lastOffset, lastStableOffset)
def version: Short = buffer.get(VersionOffset)
def producerId: Long = buffer.getLong(ProducerIdOffset)
def firstOffset: Long = buffer.getLong(FirstOffsetOffset)
def lastOffset: Long = buffer.getLong(LastOffsetOffset)
def lastStableOffset: Long = buffer.getLong(LastStableOffsetOffset)
def asAbortedTransaction: AbortedTransaction = new AbortedTransaction(producerId, firstOffset)
override def toString: String =
s"AbortedTxn(version=$version, producerId=$producerId, firstOffset=$firstOffset, " +
s"lastOffset=$lastOffset, lastStableOffset=$lastStableOffset)"
override def equals(any: Any): Boolean = {
any match {
case that: AbortedTxn => this.buffer.equals(that.buffer)
case _ => false
}
}
override def hashCode(): Int = buffer.hashCode
}
| airbnb/kafka | core/src/main/scala/kafka/log/TransactionIndex.scala | Scala | apache-2.0 | 8,618 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.filterexpr
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
/**
* Test cases for testing columns having \\N or \\null values for non numeric columns
*/
class TestAndEqualFilterEmptyOperandValue extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists carbonTable")
sql("drop table if exists hiveTable")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT
)
val csvFilePath = s"$resourcesPath/filter/datagrtlrt.csv"
sql(
"CREATE TABLE IF NOT EXISTS carbonTable(date Timestamp, country String, salary Int) STORED " +
"BY " +
"'carbondata'"
)
sql(
"create table if not exists hiveTable(date Timestamp, country String, salary Int)row format" +
" delimited fields " +
"terminated by ','"
)
sql(
"LOAD DATA LOCAL INPATH '" + csvFilePath + "' into table carbonTable OPTIONS " +
"('FILEHEADER'='date,country,salary')"
)
sql(
"LOAD DATA local inpath '" + csvFilePath + "' INTO table hiveTable"
)
}
test("select * from carbonTable where country='' and salary='')") {
checkAnswer(
sql("select * from carbonTable where country='' and salary=''"),
sql("select * from hiveTable where country='' and salary=''")
)
}
test("select * from carbonTable where date='' and salary='')") {
checkAnswer(
sql("select * from carbonTable where date='' and salary=''"),
sql("select * from hiveTable where date='' and salary=''")
)
}
override def afterAll {
sql("drop table if exists carbonTable")
sql("drop table if exists hiveTable")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
}
| Sephiroth-Lin/incubator-carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/TestAndEqualFilterEmptyOperandValue.scala | Scala | apache-2.0 | 2,858 |
package com.github.yandoroshenko.workhourscounter.calendar
import java.time.LocalDate
import play.api.libs.json.{JsArray, JsNumber, Json}
import scala.util.{Failure, Success, Try}
/**
* Created by Yan Doroshenko (yandoroshenko@protonmail.com) on 15.05.2017.
*/
trait HolidayCalendar extends Calendar {
case class Holiday(date: LocalDate, name: String)
private val BaseUrl: String = "http://kayaposoft.com/enrico/json/v1.0/"
def getHolidays(from: LocalDate, to: LocalDate, countryCode: String): Either[Throwable, Set[Holiday]] = {
Try(getDays(from, to)
.map(_.withDayOfMonth(1))
.distinct
.map(d => {
val url = f"""$BaseUrl?action=getPublicHolidaysForMonth&month=${d.getMonthValue().toString()}&year=${d.getYear.toString()}&country=$countryCode"""
log.info(f"""Reading data from $url""")
val response = io.Source.fromURL(url).mkString
log.info(f"""Received response from $url""")
log.debug(response)
Json.parse(response)
.as[JsArray].value
.map(v => v \\ "date" -> v \\ "localName")
.map(v => Holiday(
LocalDate.of(
(v._1 \\ "year").as[JsNumber].value.toIntExact,
(v._1 \\ "month").as[JsNumber].value.toIntExact,
(v._1 \\ "day").as[JsNumber].value.toInt),
v._2.as[String]))
})) match {
case Success(a) =>
val r = a.reduceLeft(_ ++ _).toSet
log.info(f"""Holidays between $from and $to: ${r.mkString("[", "\\n", "]")}""")
Right(r)
case Failure(e) =>
log.error(f"""Error getting holidays: ${e.getLocalizedMessage()}""", e)
Left(e)
}
}
} | YanDoroshenko/work-hours-counter | src/main/scala/com/github/yandoroshenko/workhourscounter/calendar/HolidayCalendar.scala | Scala | gpl-3.0 | 1,673 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import joptsimple._
import kafka.utils._
import kafka.consumer._
import kafka.client.ClientUtils
import kafka.api.{FetchRequestBuilder, OffsetRequest, Request}
import kafka.cluster.BrokerEndPoint
import scala.collection.JavaConverters._
import kafka.common.{MessageFormatter, TopicAndPartition}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.utils.Utils
/**
* Command line program to dump out messages to standard out using the simple consumer
*/
object SimpleConsumerShell extends Logging {
def UseLeaderReplica = -1
def main(args: Array[String]): Unit = {
val parser = new OptionParser
val brokerListOpt = parser.accepts("broker-list", "REQUIRED: The list of hostname and port of the server to connect to.")
.withRequiredArg
.describedAs("hostname:port,...,hostname:port")
.ofType(classOf[String])
val topicOpt = parser.accepts("topic", "REQUIRED: The topic to consume from.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val partitionIdOpt = parser.accepts("partition", "The partition to consume from.")
.withRequiredArg
.describedAs("partition")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val replicaIdOpt = parser.accepts("replica", "The replica id to consume from, default -1 means leader broker.")
.withRequiredArg
.describedAs("replica id")
.ofType(classOf[java.lang.Integer])
.defaultsTo(UseLeaderReplica)
val offsetOpt = parser.accepts("offset", "The offset id to consume from, default to -2 which means from beginning; while value -1 means from end")
.withRequiredArg
.describedAs("consume offset")
.ofType(classOf[java.lang.Long])
.defaultsTo(OffsetRequest.EarliestTime)
val clientIdOpt = parser.accepts("clientId", "The ID of this client.")
.withRequiredArg
.describedAs("clientId")
.ofType(classOf[String])
.defaultsTo("SimpleConsumerShell")
val fetchSizeOpt = parser.accepts("fetchsize", "The fetch size of each request.")
.withRequiredArg
.describedAs("fetchsize")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1024 * 1024)
val messageFormatterOpt = parser.accepts("formatter", "The name of a class to use for formatting kafka messages for display.")
.withRequiredArg
.describedAs("class")
.ofType(classOf[String])
.defaultsTo(classOf[DefaultMessageFormatter].getName)
val messageFormatterArgOpt = parser.accepts("property")
.withRequiredArg
.describedAs("prop")
.ofType(classOf[String])
val printOffsetOpt = parser.accepts("print-offsets", "Print the offsets returned by the iterator")
val maxWaitMsOpt = parser.accepts("max-wait-ms", "The max amount of time each fetch request waits.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1000)
val maxMessagesOpt = parser.accepts("max-messages", "The number of messages to consume")
.withRequiredArg
.describedAs("max-messages")
.ofType(classOf[java.lang.Integer])
.defaultsTo(Integer.MAX_VALUE)
val skipMessageOnErrorOpt = parser.accepts("skip-message-on-error", "If there is an error when processing a message, " +
"skip it instead of halt.")
val noWaitAtEndOfLogOpt = parser.accepts("no-wait-at-logend",
"If set, when the simple consumer reaches the end of the Log, it will stop, not waiting for new produced messages")
if(args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "A low-level tool for fetching data directly from a particular replica.")
val options = parser.parse(args : _*)
CommandLineUtils.checkRequiredArgs(parser, options, brokerListOpt, topicOpt)
val topic = options.valueOf(topicOpt)
val partitionId = options.valueOf(partitionIdOpt).intValue()
val replicaId = options.valueOf(replicaIdOpt).intValue()
var startingOffset = options.valueOf(offsetOpt).longValue
val fetchSize = options.valueOf(fetchSizeOpt).intValue
val clientId = options.valueOf(clientIdOpt).toString
val maxWaitMs = options.valueOf(maxWaitMsOpt).intValue()
val maxMessages = options.valueOf(maxMessagesOpt).intValue
val skipMessageOnError = options.has(skipMessageOnErrorOpt)
val printOffsets = options.has(printOffsetOpt)
val noWaitAtEndOfLog = options.has(noWaitAtEndOfLogOpt)
val messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt))
val formatterArgs = CommandLineUtils.parseKeyValueArgs(options.valuesOf(messageFormatterArgOpt).asScala)
val fetchRequestBuilder = new FetchRequestBuilder()
.clientId(clientId)
.replicaId(Request.DebuggingConsumerId)
.maxWait(maxWaitMs)
.minBytes(ConsumerConfig.MinFetchBytes)
// getting topic metadata
info("Getting topic metatdata...")
val brokerList = options.valueOf(brokerListOpt)
ToolsUtils.validatePortOrDie(parser,brokerList)
val metadataTargetBrokers = ClientUtils.parseBrokerList(brokerList)
val topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), metadataTargetBrokers, clientId, maxWaitMs).topicsMetadata
if(topicsMetadata.size != 1 || !topicsMetadata.head.topic.equals(topic)) {
System.err.println(("Error: no valid topic metadata for topic: %s, " + "what we get from server is only: %s").format(topic, topicsMetadata))
Exit.exit(1)
}
// validating partition id
val partitionsMetadata = topicsMetadata.head.partitionsMetadata
val partitionMetadataOpt = partitionsMetadata.find(p => p.partitionId == partitionId)
if (partitionMetadataOpt.isEmpty) {
System.err.println("Error: partition %d does not exist for topic %s".format(partitionId, topic))
Exit.exit(1)
}
// validating replica id and initializing target broker
var fetchTargetBroker: BrokerEndPoint = null
var replicaOpt: Option[BrokerEndPoint] = null
if (replicaId == UseLeaderReplica) {
replicaOpt = partitionMetadataOpt.get.leader
if (replicaOpt.isEmpty) {
System.err.println("Error: user specifies to fetch from leader for partition (%s, %d) which has not been elected yet".format(topic, partitionId))
Exit.exit(1)
}
}
else {
val replicasForPartition = partitionMetadataOpt.get.replicas
replicaOpt = replicasForPartition.find(r => r.id == replicaId)
if(replicaOpt.isEmpty) {
System.err.println("Error: replica %d does not exist for partition (%s, %d)".format(replicaId, topic, partitionId))
Exit.exit(1)
}
}
fetchTargetBroker = replicaOpt.get
// initializing starting offset
if(startingOffset < OffsetRequest.EarliestTime) {
System.err.println("Invalid starting offset: %d".format(startingOffset))
Exit.exit(1)
}
if (startingOffset < 0) {
val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host,
fetchTargetBroker.port,
ConsumerConfig.SocketTimeout,
ConsumerConfig.SocketBufferSize, clientId)
try {
startingOffset = simpleConsumer.earliestOrLatestOffset(TopicAndPartition(topic, partitionId), startingOffset,
Request.DebuggingConsumerId)
} catch {
case t: Throwable =>
System.err.println("Error in getting earliest or latest offset due to: " + Utils.stackTrace(t))
Exit.exit(1)
} finally {
if (simpleConsumer != null)
simpleConsumer.close()
}
}
// initializing formatter
val formatter = messageFormatterClass.newInstance().asInstanceOf[MessageFormatter]
formatter.init(formatterArgs)
val replicaString = if(replicaId > 0) "leader" else "replica"
info("Starting simple consumer shell to partition [%s, %d], %s [%d], host and port: [%s, %d], from offset [%d]"
.format(topic, partitionId, replicaString, replicaId,
fetchTargetBroker.host,
fetchTargetBroker.port, startingOffset))
val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host,
fetchTargetBroker.port,
10000, 64*1024, clientId)
val thread = Utils.newThread("kafka-simpleconsumer-shell", new Runnable() {
def run() {
var offset = startingOffset
var numMessagesConsumed = 0
try {
while (numMessagesConsumed < maxMessages) {
val fetchRequest = fetchRequestBuilder
.addFetch(topic, partitionId, offset, fetchSize)
.build()
val fetchResponse = simpleConsumer.fetch(fetchRequest)
val messageSet = fetchResponse.messageSet(topic, partitionId)
if (messageSet.validBytes <= 0 && noWaitAtEndOfLog) {
println("Terminating. Reached the end of partition (%s, %d) at offset %d".format(topic, partitionId, offset))
return
}
debug("multi fetched " + messageSet.sizeInBytes + " bytes from offset " + offset)
for (messageAndOffset <- messageSet if numMessagesConsumed < maxMessages) {
try {
offset = messageAndOffset.nextOffset
if (printOffsets)
System.out.println("next offset = " + offset)
val message = messageAndOffset.message
val key = if (message.hasKey) Utils.readBytes(message.key) else null
val value = if (message.isNull) null else Utils.readBytes(message.payload)
val serializedKeySize = if (message.hasKey) key.size else -1
val serializedValueSize = if (message.isNull) -1 else value.size
formatter.writeTo(new ConsumerRecord(topic, partitionId, offset, message.timestamp,
message.timestampType, message.checksum, serializedKeySize, serializedValueSize, key, value), System.out)
numMessagesConsumed += 1
} catch {
case e: Throwable =>
if (skipMessageOnError)
error("Error processing message, skipping this message: ", e)
else
throw e
}
if (System.out.checkError()) {
// This means no one is listening to our output stream any more, time to shutdown
System.err.println("Unable to write to standard out, closing consumer.")
formatter.close()
simpleConsumer.close()
Exit.exit(1)
}
}
}
} catch {
case e: Throwable =>
error("Error consuming topic, partition, replica (%s, %d, %d) with offset [%d]".format(topic, partitionId, replicaId, offset), e)
} finally {
info(s"Consumed $numMessagesConsumed messages")
}
}
}, false)
thread.start()
thread.join()
System.out.flush()
formatter.close()
simpleConsumer.close()
}
}
| ijuma/kafka | core/src/main/scala/kafka/tools/SimpleConsumerShell.scala | Scala | apache-2.0 | 12,990 |
// This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
//
// In jurisdictions that recognize copyright laws, the author or authors
// of this software dedicate any and all copyright interest in the
// software to the public domain. We make this dedication for the benefit
// of the public at large and to the detriment of our heirs and
// successors. We intend this dedication to be an overt act of
// relinquishment in perpetuity of all present and future rights to this
// software under copyright law.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
// For more information, please refer to <http://unlicense.org/>
package com.pasviegas.shoushiling.cli.system
package object exceptions {
case object GameHasNoMatch
extends Exception("No Match was found, can you please try again?")
case object GameHasNotBeenConfigured
extends Exception("No Game has been configured, can you please try again?")
case object NoGameModeSelected
extends Exception("No Game mode selected, can you please try again?")
case object UnknownGameModeSelected
extends Exception("Wrong game mode selected, can you please try again?")
case object UnknownMoveSelected
extends Exception("Wrong move selected, can you please try again?")
case object ConfigFileNotFound
extends Exception("File not found, can you please try again?")
case object ConfigFileNotInCorrectFormat
extends Exception("File not in correct format, can you please try again?")
}
| pasviegas/shoushiling | cli/src/main/scala/com/pasviegas/shoushiling/cli/system/exceptions/package.scala | Scala | unlicense | 2,132 |
package org.apache.spark.ml.bundle.ops.feature
import ml.combust.bundle.BundleContext
import ml.combust.bundle.op.{OpModel, OpNode}
import ml.combust.bundle.dsl._
import org.apache.spark.ml.bundle.{ParamSpec, SimpleParamSpec, SimpleSparkOp, SparkBundleContext}
import org.apache.spark.ml.feature.Tokenizer
/**
* Created by hollinwilkins on 8/21/16.
*/
class TokenizerOp extends SimpleSparkOp[Tokenizer] {
override val Model: OpModel[SparkBundleContext, Tokenizer] = new OpModel[SparkBundleContext, Tokenizer] {
override val klazz: Class[Tokenizer] = classOf[Tokenizer]
override def opName: String = Bundle.BuiltinOps.feature.tokenizer
override def store(model: Model, obj: Tokenizer)
(implicit context: BundleContext[SparkBundleContext]): Model = { model }
override def load(model: Model)
(implicit context: BundleContext[SparkBundleContext]): Tokenizer = new Tokenizer(uid = "")
}
override def sparkLoad(uid: String, shape: NodeShape, model: Tokenizer): Tokenizer = {
new Tokenizer(uid = uid)
}
override def sparkInputs(obj: Tokenizer): Seq[ParamSpec] = {
Seq("input" -> obj.inputCol)
}
override def sparkOutputs(obj: Tokenizer): Seq[SimpleParamSpec] = {
Seq("output" -> obj.outputCol)
}
}
| combust/mleap | mleap-spark/src/main/scala/org/apache/spark/ml/bundle/ops/feature/TokenizerOp.scala | Scala | apache-2.0 | 1,290 |
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object search extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template5[String,String,String,List[User],List[Workflow],play.api.templates.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(username: String, userId: String, category: String, results: List[User], resultswf: List[Workflow]):play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.102*/("""
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Document</title>
<link href='"""),_display_(Seq[Any](/*8.22*/routes/*8.28*/.Assets.at("css/bootstrap.min.css"))),format.raw/*8.63*/("""'
rel="stylesheet"/>
<link href='"""),_display_(Seq[Any](/*10.22*/routes/*10.28*/.Assets.at("css/header.css"))),format.raw/*10.56*/("""'
rel="stylesheet"/>
<script src='"""),_display_(Seq[Any](/*13.23*/routes/*13.29*/.Assets.at("js/jquery-1.1.js"))),format.raw/*13.59*/("""'></script>
</head>
<body>
<div id="header">
"""),_display_(Seq[Any](/*17.10*/fixed/*17.15*/.header(username, Long.parseLong(userId), Long.parseLong(userId)))),format.raw/*17.80*/("""
</div>
<div class="container row">
<div class="col-md-8 col-md-offset-1">
<form action="/searchResult">
<div class="input-group">
<div class="input-group-btn">
<button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"><span id="searchLabel">User</span> <span class="caret"></span></button>
<ul id="search-category" class="dropdown-menu">
<li><a href="javascript:void(0)" data-opt="user">User</a></li>
<li><a href="javascript:void(0)" data-opt="workflow">Workflow</a></li>
<li><a href="javascript:void(0)" data-opt="tag">Tag</a></li>
</ul>
</div> <!-- /btn-group -->
<input type="hidden" value="user" name="category" id="input-category">
<input type="text" class="form-control" placeholder="Search for..." name="keywd" aria-label="...">
<span class="input-group-btn">
<input class="btn btn-default" type="submit">Search</input>
</span>
</div> <!-- /input-group -->
</form>
"""),_display_(Seq[Any](/*38.18*/if((results != null && results.size > 0) || (resultswf != null && resultswf.size > 0))/*38.104*/ {_display_(Seq[Any](format.raw/*38.106*/("""
<h3>Search result:</h3>
<table class="table">
"""),_display_(Seq[Any](/*41.26*/if(category.equals("user"))/*41.53*/ {_display_(Seq[Any](format.raw/*41.55*/("""
<tr><th>User</th><th>Email</th><th>View</th></tr>
"""),_display_(Seq[Any](/*43.30*/for( result <- results) yield /*43.53*/ {_display_(Seq[Any](format.raw/*43.55*/("""
<tr>
<td>"""),_display_(Seq[Any](/*45.42*/result/*45.48*/.getUserName)),format.raw/*45.60*/("""</td>
<td>"""),_display_(Seq[Any](/*46.42*/result/*46.48*/.getEmail)),format.raw/*46.57*/("""</td>
<td><a href="/profile/"""),_display_(Seq[Any](/*47.60*/result/*47.66*/.getId)),format.raw/*47.72*/("""">More</a></td>
</tr>
""")))})),format.raw/*49.30*/("""
""")))})),format.raw/*50.26*/("""
"""),_display_(Seq[Any](/*51.26*/if(category.equals("tag") || category.equals("workflow"))/*51.83*/ {_display_(Seq[Any](format.raw/*51.85*/("""
<tr><th>Title</th><th>Description</th></tr>
"""),_display_(Seq[Any](/*53.30*/for( result <- resultswf) yield /*53.55*/ {_display_(Seq[Any](format.raw/*53.57*/("""
<tr>
<td><a href="/workflow/get/"""),_display_(Seq[Any](/*55.65*/result/*55.71*/.getId)),format.raw/*55.77*/("""">"""),_display_(Seq[Any](/*55.80*/result/*55.86*/.getWfTitle)),format.raw/*55.97*/("""</a></td>
<td>"""),_display_(Seq[Any](/*56.42*/result/*56.48*/.getWfDesc)),format.raw/*56.58*/("""</td>
</tr>
""")))})),format.raw/*58.30*/("""
""")))})),format.raw/*59.26*/("""
</table>
""")))}/*62.19*/else/*62.24*/{_display_(Seq[Any](format.raw/*62.25*/("""
"""),_display_(Seq[Any](/*63.22*/if(category != null)/*63.42*/ {_display_(Seq[Any](format.raw/*63.44*/("""
<h3>We haven't find any matches.</h3>
""")))})),format.raw/*65.22*/("""
""")))})),format.raw/*66.18*/("""
</div>
</div>
<script src='"""),_display_(Seq[Any](/*69.23*/routes/*69.29*/.Assets.at("/js/wf-search.js"))),format.raw/*69.59*/("""'></script>
</body>
</html>
"""))}
}
def render(username:String,userId:String,category:String,results:List[User],resultswf:List[Workflow]): play.api.templates.HtmlFormat.Appendable = apply(username,userId,category,results,resultswf)
def f:((String,String,String,List[User],List[Workflow]) => play.api.templates.HtmlFormat.Appendable) = (username,userId,category,results,resultswf) => apply(username,userId,category,results,resultswf)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Thu Apr 07 14:52:40 PDT 2016
SOURCE: /home/dimitris/CMU/SA&D/Project/ApacheCMDA-Frontend/app/views/search.scala.html
HASH: a910341ccb4178811fe15302176d56be4da34d33
MATRIX: 815->1|1010->101|1175->231|1189->237|1245->272|1331->322|1346->328|1396->356|1484->408|1499->414|1551->444|1657->514|1671->519|1758->584|3206->1996|3302->2082|3343->2084|3491->2196|3527->2223|3567->2225|3711->2333|3750->2356|3790->2358|3905->2437|3920->2443|3954->2455|4037->2502|4052->2508|4083->2517|4184->2582|4199->2588|4227->2594|4342->2677|4400->2703|4462->2729|4528->2786|4568->2788|4706->2890|4747->2915|4787->2917|4925->3019|4940->3025|4968->3031|5007->3034|5022->3040|5055->3051|5142->3102|5157->3108|5189->3118|5294->3191|5352->3217|5419->3266|5432->3271|5471->3272|5529->3294|5558->3314|5598->3316|5714->3400|5764->3418|5857->3475|5872->3481|5924->3511
LINES: 26->1|29->1|36->8|36->8|36->8|38->10|38->10|38->10|41->13|41->13|41->13|45->17|45->17|45->17|66->38|66->38|66->38|69->41|69->41|69->41|71->43|71->43|71->43|73->45|73->45|73->45|74->46|74->46|74->46|75->47|75->47|75->47|77->49|78->50|79->51|79->51|79->51|81->53|81->53|81->53|83->55|83->55|83->55|83->55|83->55|83->55|84->56|84->56|84->56|86->58|87->59|90->62|90->62|90->62|91->63|91->63|91->63|93->65|94->66|97->69|97->69|97->69
-- GENERATED --
*/
| dsarlis/SAD-Spring-2016-Project-Team4 | ApacheCMDA-Frontend/target/scala-2.10/src_managed/main/views/html/search.template.scala | Scala | mit | 7,934 |
package models.daos.slick
import com.mohiva.play.silhouette.core.LoginInfo
import com.mohiva.play.silhouette.core.providers.PasswordInfo
import com.mohiva.play.silhouette.contrib.daos.DelegableAuthInfoDAO
import play.api.db.slick._
import scala.concurrent.Future
import models.daos.slick.DBTableDefinitions._
import play.api.db.slick.Config.driver.simple._
/**
* The DAO to store the password information.
*/
class PasswordInfoDAOSlick extends DelegableAuthInfoDAO[PasswordInfo] {
import play.api.Play.current
/**
* Saves the password info.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The password info to save.
* @return The saved password info or None if the password info couldn't be saved.
*/
def save(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] = {
/*
data += (loginInfo -> authInfo)
Future.successful(authInfo)
*/
Future.successful {
DB withSession {implicit session =>
val infoId = slickLoginInfos.filter(
x => x.providerID === loginInfo.providerID && x.providerKey === loginInfo.providerKey
).first.id.get
slickPasswordInfos insert DBPasswordInfo(authInfo.hasher, authInfo.password, authInfo.salt, infoId)
authInfo
}
}
}
/**
* Finds the password info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @return The retrieved password info or None if no password info could be retrieved for the given login info.
*/
def find(loginInfo: LoginInfo): Future[Option[PasswordInfo]] = {
Future.successful {
DB withSession { implicit session =>
slickLoginInfos.filter(info => info.providerID === loginInfo.providerID && info.providerKey === loginInfo.providerKey).firstOption match {
case Some(info) =>
val passwordInfo = slickPasswordInfos.filter(_.loginInfoId === info.id).first
Some(PasswordInfo(passwordInfo.hasher, passwordInfo.password, passwordInfo.salt))
case _ => None
}
}
}
}
}
| Wirwing/hello-conekta-play-framework | app/models/daos/slick/PasswordInfoDAOSlick.scala | Scala | mit | 2,117 |
package com.youdevise.albatross
import org.specs2.Specification
import Bounds._
import org.specs2.matcher.Matcher
class IntervalSpec extends Specification {
def is =
"An interval" ^
"is empty if it encloses no points" ! {
(open(0) to open(0)) must beEmpty
} ^
"is a singleton if it encloses only one point" ! {
(closed(0) to closed(0)) must beASingleton
} ^
"encloses any point within its bounds" ! {
(open(10) to closed(20) must enclose(15)) and
(unbounded[Int] to closed(20) must enclose(-1000)) and
(closed(10) to unbounded[Int] must enclose(1000)) and
(closed(10) to closed(10) must enclose(10))
} ^
"does not enclose any point outside its bounds" ! {
(open(10) to closed(20) must not(enclose(10))) and
(unbounded[Int] to closed(20) must not(enclose(1000))) and
(closed(10) to unbounded[Int] must not(enclose(-1000)))
} ^
"encloses any interval whose bounds are within its own" ! {
(open(-1) to open(11)) must encloseInterval (open(0) to open(10))
} ^
"does not enclose any interval either of whose bounds are outside its own" ! {
((open(-1) to open(5)) must not(encloseInterval(open(0) to open(10)))) and
((open(5) to open(11)) must not(encloseInterval(open(0) to open(10))))
} ^ end ^
bt ^
"The intersection of two intervals" ^
"Is the second interval if the first contains the second" ! {
(open(-10) to open(10)) intersect (open(-5) to open(5)) must_== (open(-5) to open(5))
} ^
"Is the first interval if the second contains the first" ! {
(open(-10) to open(10)) intersect (open(-25) to open(25)) must_== (open(-10) to open(10))
} ^
"Is the overlap between the two intervals if they are connected" ! {
(open(-10) to open(10)) intersect (open(5) to open(15)) must_== (open(5) to open(10))
} ^
"Is a singleton interval if the two intervals abut" ! {
(closed(-10) to closed(10)) intersect (closed(10) to closed(20)) must_== (closed(10) to closed(10))
} ^
"Is an open interval when one is open and the other is closed, but both have the same endpoints" ! {
val openInterval = open(-10) to open(10)
val closedInterval = closed(-10) to closed(10)
openInterval intersect closedInterval must_== openInterval
closedInterval intersect openInterval must_== openInterval
} ^
"Is empty if the two intervals do not touch" ! {
((open(0) to open(10)) intersect (open(10) to open(20))) must beEmpty
} ^ end ^
bt ^
"The union of two intervals" ^
"Is a set containing both if the intervals are not connected" ! {
((open(0) to open(10)) union (open(10) to open(20))) must_== IntervalSet(open(0) to open(10), open(10) to open(20))
} ^
"Is a set containing a single combined interval if the intervals are connected" ! {
((open(0) to closed(10)) union (closed(10) to open(20))) must_== (open(0) to open(20))
} ^ end ^
bt ^
"The complement of two intervals" ^
"Is an empty set if the second encloses the first" ! {
((open(0) to open(10)) complement (open(-1) to open(11))) must beEmpty
} ^
"Is a singleton set containing the truncated first set if the second set overlaps the first" ! {
(((open(0) to open(10)) complement (open(5) to open(15))) must_== (open(0) to closed(5))) and
(((open(0) to open(10)) complement (open(-5) to open(5))) must_== (closed(5) to open(10)))
} ^
"Is a set containing a pair of separated intervals, if the first interval encloses the second" ! {
((open(0) to open(10)) complement (closed(3) to closed(7))) must_== IntervalSet(open(0) to open(3), open(7) to open(10))
} ^ end
def beASingleton[T]: Matcher[IntervalSet[T]] = ((_: IntervalSet[T]).isASingleton, "is not a singleton")
def enclose[T](value: T): Matcher[IntervalSet[T]] = ((_: IntervalSet[T]).encloses(value), "doesn't enclose %s".format(value))
def encloseInterval[T](other: IntervalSet[T]): Matcher[IntervalSet[T]] = ((_: IntervalSet[T]).enclosesInterval(other), "doesn't enclose %s".format(other))
}
| tim-group/albatross | src/test/scala/IntervalSpec.scala | Scala | mit | 4,114 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.stream
import java.io.OutputStream
import java.nio.charset.Charset
import com.ibm.spark.kernel.protocol.v5.content.StreamContent
import com.ibm.spark.kernel.protocol.v5.{ActorLoader, KernelMessage, _}
import play.api.libs.json.Json
import scala.collection.mutable.ListBuffer
class KernelMessageStream(
actorLoader: ActorLoader,
kmBuilder: KMBuilder
) extends OutputStream {
private val EncodingType = Charset.forName("UTF-8")
private var internalBytes: ListBuffer[Byte] = ListBuffer()
/**
* Takes the current byte array contents in memory, packages them up into a
* KernelMessage, and sends the message to the KernelMessageRelay.
*/
override def flush(): Unit = {
val contents = new String(internalBytes.toArray, EncodingType)
val streamContent = StreamContent(
"stdout", contents
)
val kernelMessage = kmBuilder
.withIds(Seq(MessageType.Stream.toString))
.withHeader(MessageType.Stream)
.withContentString(streamContent).build
actorLoader.load(SystemActorType.KernelMessageRelay) ! kernelMessage
// Ensure any underlying implementation is processed
super.flush()
// Clear the internal buffer
internalBytes.clear()
}
/**
* Adds the specified byte to the end of the internal buffer. The most
* significant 24 bits are ignored. Only the least significant 8 bits
* are appended.
* @param b The byte whose least significant 8 bits are to be appended
*/
override def write(b: Int): Unit = {
internalBytes += b.toByte
// Attempt a flush if the provided byte was a newline
if (b.toChar == '\\n') flush()
}
}
| bpburns/spark-kernel | kernel/src/main/scala/com/ibm/spark/kernel/protocol/v5/stream/KernelMessageStream.scala | Scala | apache-2.0 | 2,262 |
package sorm.reflection
import sorm._
import sext._, embrace._
// or InstanceReflection
class Reflected
( val instance : Any,
val reflection : Reflection )
{
def propertyValues
: Map[String, Any]
= reflection.properties.view.unzip._1.zipBy(propertyValue).toMap
def propertyValue
( name: String )
: Any
= reflection.propertyValue(name, instance.asInstanceOf[AnyRef])
def methodResult
( name: String,
args: List[Any] = Nil )
: Any
= throw new NotImplementedError
}
| sorm/sorm | src/main/scala/sorm/reflection/Reflected.scala | Scala | mit | 548 |
/*
* Copyright 2017 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.ml.samples.criteo
import org.apache.spark.sql.{DataFrame, SparkSession}
import scopt.OptionParser
/**
* Union of the different modes in which preprocessing can be done.
*/
sealed trait PreprocessingMode
case object Analyze extends PreprocessingMode
case object Transform extends PreprocessingMode
/**
* Converts string for mode into the appropriate `PreprocessingMode` object.
*/
object PreprocessingMode {
def apply(specifier: String): Option[PreprocessingMode] = specifier.toLowerCase match {
case "analyze" => Some(Analyze)
case "transform" => Some(Transform)
case _ => None
}
}
case class NewClargConfig(basePath: String = "",
relativeInputPath: String = "",
relativeOutputPath: String = "",
mode: PreprocessingMode = Analyze,
numPartitions: Int = 500)
object CriteoPreprocessingApplication {
def main(args: Array[String]) {
val parser = new OptionParser[NewClargConfig]("Criteo TFRecord Preprocessor") {
head("CriteoPreprocessingApplication", "1.0.0")
help("help").text("Prints this description of the CLI to the Criteo TFRecord Preprocessor")
opt[String]('b', "base").required.action((b, c) => c.copy(basePath = b)).text(
"The base path along which the application should find inputs and store outputs. Required."
)
opt[String]('i', "in").required.action((i, c) => c.copy(relativeInputPath = i)).text(
"The pattern relative to the base path which the input files match. Required."
)
opt[String]('o', "out").required.action((o, c) => c.copy(relativeOutputPath = o)).text(
"The relative path to the directory in which the resulting transformed TFRecord files" +
" or analyze artifacts should be stored."
)
opt[Int]('n', "numPartitions").action((n, c) => c.copy(numPartitions = n)).text(
"The number of partitions in which to process the input file. Default is 500."
)
opt[String]('m', "mode").action(
(m, c) => {
val mod = PreprocessingMode(m)
c.copy(mode =
mod match {
case Some(mod) => mod
case None =>
throw new Exception("Illegal mode passed under -m or --mode." +
"Pass \\"analyze\\", \\"transform\\".")
})
}
).text(
"\\"analyze\\", \\"transform\\""
)
}
parser.parse(args, NewClargConfig()) match {
case Some(config) =>
implicit val spark = SparkSession.builder().
appName("Criteo TFRecord Preprocessor").
getOrCreate()
val inputPath = config.basePath ++ config.relativeInputPath
val outputPath = config.basePath ++ config.relativeOutputPath
val artifactPath = config.basePath ++ "artifacts/"
val features = CriteoFeatures()
val artifactExporter = config.mode match {
case Analyze => new FileArtifactExporter(config.basePath ++ "artifacts/")
case _ => new EmptyArtifactExporter()
}
val indexer = new TrainingIndexer(features)
val importer = new CleanTSVImporter(inputPath,
features.inputSchema,
config.numPartitions)
config.mode match {
case Analyze =>
val analyzer = new CriteoAnalyzer(inputPath, features.inputSchema,
features, config.numPartitions, indexer, importer, artifactExporter)
analyzer()
case Transform =>
val vocabularyImporter = new ArtifactVocabularyImporter(features, artifactPath)
val exporter = new FileExporter(outputPath, "tfrecords")
val transformer = new CriteoTransformer(inputPath,
features, config.numPartitions, indexer,
artifactPath, vocabularyImporter)
val resultDf = transformer(importer.criteoImport)
exporter.criteoExport(resultDf)
}
}
}
}
| GoogleCloudDataproc/cloud-dataproc | spark-tensorflow/prepare/src/main/scala/com/google/cloud/ml/samples/criteo/CriteoPreprocessingApplication.scala | Scala | apache-2.0 | 4,640 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.akka
import java.io.IOException
import java.net._
import java.util.concurrent.{Callable, CompletableFuture, TimeUnit}
import akka.actor._
import akka.pattern.{ask => akkaAsk}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.flink.api.common.time.Time
import org.apache.flink.configuration._
import org.apache.flink.runtime.clusterframework.BootstrapTools.{FixedThreadPoolExecutorConfiguration, ForkJoinExecutorConfiguration}
import org.apache.flink.runtime.concurrent.FutureUtils
import org.apache.flink.runtime.net.SSLUtils
import org.apache.flink.util.NetUtils
import org.apache.flink.util.function.FunctionUtils
import org.jboss.netty.channel.ChannelException
import org.jboss.netty.logging.{InternalLoggerFactory, Slf4JLoggerFactory}
import org.slf4j.{Logger, LoggerFactory}
import scala.annotation.tailrec
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* This class contains utility functions for akka. It contains methods to start an actor system with
* a given akka configuration. Furthermore, the akka configuration used for starting the different
* actor systems resides in this class.
*/
object AkkaUtils {
val LOG: Logger = LoggerFactory.getLogger(AkkaUtils.getClass)
val INF_TIMEOUT: FiniteDuration = 21474835 seconds
val FLINK_ACTOR_SYSTEM_NAME = "flink"
def getFlinkActorSystemName = {
FLINK_ACTOR_SYSTEM_NAME
}
/**
* Creates a local actor system without remoting.
*
* @param configuration instance containing the user provided configuration values
* @return The created actor system
*/
def createLocalActorSystem(configuration: Configuration): ActorSystem = {
val akkaConfig = getAkkaConfig(configuration, None)
createActorSystem(akkaConfig)
}
/**
* Creates an actor system bound to the given hostname and port.
*
* @param configuration instance containing the user provided configuration values
* @param hostname of the network interface to bind to
* @param port of to bind to
* @return created actor system
*/
def createActorSystem(
configuration: Configuration,
hostname: String,
port: Int)
: ActorSystem = {
createActorSystem(configuration, Some((hostname, port)))
}
/**
* Creates an actor system. If a listening address is specified, then the actor system will listen
* on that address for messages from a remote actor system. If not, then a local actor system
* will be instantiated.
*
* @param configuration instance containing the user provided configuration values
* @param listeningAddress an optional tuple containing a bindAddress and a port to bind to.
* If the parameter is None, then a local actor system will be created.
* @return created actor system
*/
def createActorSystem(
configuration: Configuration,
listeningAddress: Option[(String, Int)])
: ActorSystem = {
val akkaConfig = getAkkaConfig(configuration, listeningAddress)
createActorSystem(akkaConfig)
}
/**
* Creates an actor system with the given akka config.
*
* @param akkaConfig configuration for the actor system
* @return created actor system
*/
def createActorSystem(akkaConfig: Config): ActorSystem = {
createActorSystem(FLINK_ACTOR_SYSTEM_NAME, akkaConfig)
}
/**
* Creates an actor system with the given akka config.
*
* @param akkaConfig configuration for the actor system
* @return created actor system
*/
def createActorSystem(actorSystemName: String, akkaConfig: Config): ActorSystem = {
// Initialize slf4j as logger of Akka's Netty instead of java.util.logging (FLINK-1650)
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory)
RobustActorSystem.create(actorSystemName, akkaConfig)
}
/**
* Creates an actor system with the default config and listening on a random port of the
* localhost.
*
* @return default actor system listening on a random port of the localhost
*/
def createDefaultActorSystem(): ActorSystem = {
createActorSystem(getDefaultAkkaConfig)
}
/**
* Returns a remote Akka config for the given configuration values.
*
* @param configuration containing the user provided configuration values
* @param hostname to bind against. If null, then the loopback interface is used
* @param port to bind against
* @param executorConfig containing the user specified config of executor
* @return A remote Akka config
*/
def getAkkaConfig(configuration: Configuration,
hostname: String,
port: Int,
executorConfig: Config): Config = {
getAkkaConfig(configuration, Some((hostname, port)), executorConfig)
}
/**
* Returns a remote Akka config for the given configuration values.
*
* @param configuration containing the user provided configuration values
* @param hostname to bind against. If null, then the loopback interface is used
* @param port to bind against
* @return A remote Akka config
*/
def getAkkaConfig(configuration: Configuration,
hostname: String,
port: Int): Config = {
getAkkaConfig(configuration, Some((hostname, port)))
}
/**
* Return a local Akka config for the given configuration values.
*
* @param configuration containing the user provided configuration values
* @return A local Akka config
*/
def getAkkaConfig(configuration: Configuration): Config = {
getAkkaConfig(configuration, None)
}
/**
* Creates an akka config with the provided configuration values. If the listening address is
* specified, then the actor system will listen on the respective address.
*
* @param configuration instance containing the user provided configuration values
* @param externalAddress optional tuple of bindAddress and port to be reachable at.
* If None is given, then an Akka config for local actor system
* will be returned
* @return Akka config
*/
@throws(classOf[UnknownHostException])
def getAkkaConfig(configuration: Configuration,
externalAddress: Option[(String, Int)]): Config = {
getAkkaConfig(
configuration,
externalAddress,
getForkJoinExecutorConfig(ForkJoinExecutorConfiguration.fromConfiguration(configuration)))
}
/**
* Creates an akka config with the provided configuration values. If the listening address is
* specified, then the actor system will listen on the respective address.
*
* @param configuration instance containing the user provided configuration values
* @param externalAddress optional tuple of bindAddress and port to be reachable at.
* If None is given, then an Akka config for local actor system
* will be returned
* @param executorConfig config defining the used executor by the default dispatcher
* @return Akka config
*/
@throws(classOf[UnknownHostException])
def getAkkaConfig(configuration: Configuration,
externalAddress: Option[(String, Int)],
executorConfig: Config): Config = {
val defaultConfig = getBasicAkkaConfig(configuration).withFallback(executorConfig)
externalAddress match {
case Some((hostname, port)) =>
val remoteConfig = getRemoteAkkaConfig(configuration,
// the wildcard IP lets us bind to all network interfaces
NetUtils.getWildcardIPAddress, port,
hostname, port)
remoteConfig.withFallback(defaultConfig)
case None =>
defaultConfig
}
}
/**
* Creates the default akka configuration which listens on a random port on the local machine.
* All configuration values are set to default values.
*
* @return Flink's Akka default config
*/
def getDefaultAkkaConfig: Config = {
getAkkaConfig(new Configuration(), Some(("", 0)))
}
/**
* Gets the basic Akka config which is shared by remote and local actor systems.
*
* @param configuration instance which contains the user specified values for the configuration
* @return Flink's basic Akka config
*/
private def getBasicAkkaConfig(configuration: Configuration): Config = {
val akkaThroughput = configuration.getInteger(AkkaOptions.DISPATCHER_THROUGHPUT)
val lifecycleEvents = configuration.getBoolean(AkkaOptions.LOG_LIFECYCLE_EVENTS)
val jvmExitOnFatalError = if (
configuration.getBoolean(AkkaOptions.JVM_EXIT_ON_FATAL_ERROR)){
"on"
} else {
"off"
}
val logLifecycleEvents = if (lifecycleEvents) "on" else "off"
val logLevel = getLogLevel
val supervisorStrategy = classOf[StoppingSupervisorWithoutLoggingActorKilledExceptionStrategy]
.getCanonicalName
val config =
s"""
|akka {
| daemonic = off
|
| loggers = ["akka.event.slf4j.Slf4jLogger"]
| logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
| log-config-on-start = off
|
| jvm-exit-on-fatal-error = $jvmExitOnFatalError
|
| serialize-messages = off
|
| loglevel = $logLevel
| stdout-loglevel = OFF
|
| log-dead-letters = $logLifecycleEvents
| log-dead-letters-during-shutdown = $logLifecycleEvents
|
| actor {
| guardian-supervisor-strategy = $supervisorStrategy
|
| warn-about-java-serializer-usage = off
|
| default-dispatcher {
| throughput = $akkaThroughput
| }
| }
|}
""".stripMargin
ConfigFactory.parseString(config)
}
def getThreadPoolExecutorConfig(configuration: FixedThreadPoolExecutorConfiguration): Config = {
val threadPriority = configuration.getThreadPriority
val minNumThreads = configuration.getMinNumThreads
val maxNumThreads = configuration.getMaxNumThreads
val configString = s"""
|akka {
| actor {
| default-dispatcher {
| type = akka.dispatch.PriorityThreadsDispatcher
| executor = "thread-pool-executor"
| thread-priority = $threadPriority
| thread-pool-executor {
| core-pool-size-min = $minNumThreads
| core-pool-size-max = $maxNumThreads
| }
| }
| }
|}
""".
stripMargin
ConfigFactory.parseString(configString)
}
def getForkJoinExecutorConfig(configuration: ForkJoinExecutorConfiguration): Config = {
val forkJoinExecutorParallelismFactor = configuration.getParallelismFactor
val forkJoinExecutorParallelismMin = configuration.getMinParallelism
val forkJoinExecutorParallelismMax = configuration.getMaxParallelism
val configString = s"""
|akka {
| actor {
| default-dispatcher {
| executor = "fork-join-executor"
| fork-join-executor {
| parallelism-factor = $forkJoinExecutorParallelismFactor
| parallelism-min = $forkJoinExecutorParallelismMin
| parallelism-max = $forkJoinExecutorParallelismMax
| }
| }
| }
|}""".stripMargin
ConfigFactory.parseString(configString)
}
def testDispatcherConfig: Config = {
val config =
s"""
|akka {
| actor {
| default-dispatcher {
| fork-join-executor {
| parallelism-factor = 1.0
| parallelism-min = 2
| parallelism-max = 4
| }
| }
| }
|}
""".stripMargin
ConfigFactory.parseString(config)
}
private def validateHeartbeat(pauseParamName: String,
pauseValue: String,
intervalParamName: String,
intervalValue: String): Unit = {
if (Duration.apply(pauseValue).lteq(Duration.apply(intervalValue))) {
throw new IllegalConfigurationException(
"%s [%s] must greater then %s [%s]",
pauseParamName,
pauseValue,
intervalParamName,
intervalValue)
}
}
/**
* Creates a Akka config for a remote actor system listening on port on the network interface
* identified by bindAddress.
*
* @param configuration instance containing the user provided configuration values
* @param bindAddress of the network interface to bind on
* @param port to bind to or if 0 then Akka picks a free port automatically
* @param externalHostname The host name to expect for Akka messages
* @param externalPort The port to expect for Akka messages
* @return Flink's Akka configuration for remote actor systems
*/
private def getRemoteAkkaConfig(
configuration: Configuration,
bindAddress: String,
port: Int,
externalHostname: String,
externalPort: Int): Config = {
val normalizedExternalHostname = NetUtils.unresolvedHostToNormalizedString(externalHostname)
val akkaAskTimeout = Duration(configuration.getString(AkkaOptions.ASK_TIMEOUT))
val startupTimeout = configuration.getString(
AkkaOptions.STARTUP_TIMEOUT,
(akkaAskTimeout * 10).toString)
val transportHeartbeatInterval = configuration.getString(
AkkaOptions.TRANSPORT_HEARTBEAT_INTERVAL)
val transportHeartbeatPause = configuration.getString(
AkkaOptions.TRANSPORT_HEARTBEAT_PAUSE)
validateHeartbeat(
AkkaOptions.TRANSPORT_HEARTBEAT_PAUSE.key(),
transportHeartbeatPause,
AkkaOptions.TRANSPORT_HEARTBEAT_INTERVAL.key(),
transportHeartbeatInterval)
val transportThreshold = configuration.getDouble(AkkaOptions.TRANSPORT_THRESHOLD)
val watchHeartbeatInterval = configuration.getString(
AkkaOptions.WATCH_HEARTBEAT_INTERVAL)
val watchHeartbeatPause = configuration.getString(AkkaOptions.WATCH_HEARTBEAT_PAUSE)
validateHeartbeat(
AkkaOptions.WATCH_HEARTBEAT_PAUSE.key(),
watchHeartbeatPause,
AkkaOptions.WATCH_HEARTBEAT_INTERVAL.key(),
watchHeartbeatInterval)
val watchThreshold = configuration.getInteger(AkkaOptions.WATCH_THRESHOLD)
val akkaTCPTimeout = configuration.getString(AkkaOptions.TCP_TIMEOUT)
val akkaFramesize = configuration.getString(AkkaOptions.FRAMESIZE)
val lifecycleEvents = configuration.getBoolean(AkkaOptions.LOG_LIFECYCLE_EVENTS)
val logLifecycleEvents = if (lifecycleEvents) "on" else "off"
val akkaEnableSSLConfig = configuration.getBoolean(AkkaOptions.SSL_ENABLED) &&
SSLUtils.isInternalSSLEnabled(configuration)
val retryGateClosedFor = configuration.getLong(AkkaOptions.RETRY_GATE_CLOSED_FOR)
val akkaEnableSSL = if (akkaEnableSSLConfig) "on" else "off"
val akkaSSLKeyStore = configuration.getString(
SecurityOptions.SSL_INTERNAL_KEYSTORE,
configuration.getString(SecurityOptions.SSL_KEYSTORE))
val akkaSSLKeyStorePassword = configuration.getString(
SecurityOptions.SSL_INTERNAL_KEYSTORE_PASSWORD,
configuration.getString(SecurityOptions.SSL_KEYSTORE_PASSWORD))
val akkaSSLKeyPassword = configuration.getString(
SecurityOptions.SSL_INTERNAL_KEY_PASSWORD,
configuration.getString(SecurityOptions.SSL_KEY_PASSWORD))
val akkaSSLTrustStore = configuration.getString(
SecurityOptions.SSL_INTERNAL_TRUSTSTORE,
configuration.getString(SecurityOptions.SSL_TRUSTSTORE))
val akkaSSLTrustStorePassword = configuration.getString(
SecurityOptions.SSL_INTERNAL_TRUSTSTORE_PASSWORD,
configuration.getString(SecurityOptions.SSL_TRUSTSTORE_PASSWORD))
val akkaSSLProtocol = configuration.getString(SecurityOptions.SSL_PROTOCOL)
val akkaSSLAlgorithmsString = configuration.getString(SecurityOptions.SSL_ALGORITHMS)
val akkaSSLAlgorithms = akkaSSLAlgorithmsString.split(",").toList.mkString("[", ",", "]")
val clientSocketWorkerPoolPoolSizeMin =
configuration.getInteger(AkkaOptions.CLIENT_SOCKET_WORKER_POOL_SIZE_MIN)
val clientSocketWorkerPoolPoolSizeMax =
configuration.getInteger(AkkaOptions.CLIENT_SOCKET_WORKER_POOL_SIZE_MAX)
val clientSocketWorkerPoolPoolSizeFactor =
configuration.getDouble(AkkaOptions.CLIENT_SOCKET_WORKER_POOL_SIZE_FACTOR)
val serverSocketWorkerPoolPoolSizeMin =
configuration.getInteger(AkkaOptions.SERVER_SOCKET_WORKER_POOL_SIZE_MIN)
val serverSocketWorkerPoolPoolSizeMax =
configuration.getInteger(AkkaOptions.SERVER_SOCKET_WORKER_POOL_SIZE_MAX)
val serverSocketWorkerPoolPoolSizeFactor =
configuration.getDouble(AkkaOptions.SERVER_SOCKET_WORKER_POOL_SIZE_FACTOR)
val configString =
s"""
|akka {
| actor {
| provider = "akka.remote.RemoteActorRefProvider"
| }
|
| remote {
| startup-timeout = $startupTimeout
|
| transport-failure-detector{
| acceptable-heartbeat-pause = $transportHeartbeatPause
| heartbeat-interval = $transportHeartbeatInterval
| threshold = $transportThreshold
| }
|
| watch-failure-detector{
| heartbeat-interval = $watchHeartbeatInterval
| acceptable-heartbeat-pause = $watchHeartbeatPause
| threshold = $watchThreshold
| }
|
| netty {
| tcp {
| transport-class = "akka.remote.transport.netty.NettyTransport"
| port = $externalPort
| bind-port = $port
| connection-timeout = $akkaTCPTimeout
| maximum-frame-size = $akkaFramesize
| tcp-nodelay = on
|
| client-socket-worker-pool {
| pool-size-min = $clientSocketWorkerPoolPoolSizeMin
| pool-size-max = $clientSocketWorkerPoolPoolSizeMax
| pool-size-factor = $clientSocketWorkerPoolPoolSizeFactor
| }
|
| server-socket-worker-pool {
| pool-size-min = $serverSocketWorkerPoolPoolSizeMin
| pool-size-max = $serverSocketWorkerPoolPoolSizeMax
| pool-size-factor = $serverSocketWorkerPoolPoolSizeFactor
| }
| }
| }
|
| log-remote-lifecycle-events = $logLifecycleEvents
|
| retry-gate-closed-for = ${retryGateClosedFor + " ms"}
| }
|}
""".stripMargin
val effectiveHostname =
if (normalizedExternalHostname != null && normalizedExternalHostname.nonEmpty) {
normalizedExternalHostname
} else {
// if bindAddress is null or empty, then leave bindAddress unspecified. Akka will pick
// InetAddress.getLocalHost.getHostAddress
"\\"\\""
}
val hostnameConfigString =
s"""
|akka {
| remote {
| netty {
| tcp {
| hostname = $effectiveHostname
| bind-hostname = $bindAddress
| }
| }
| }
|}
""".stripMargin
val sslConfigString = if (akkaEnableSSLConfig) {
s"""
|akka {
| remote {
|
| enabled-transports = ["akka.remote.netty.ssl"]
|
| netty {
|
| ssl = $${akka.remote.netty.tcp}
|
| ssl {
|
| enable-ssl = $akkaEnableSSL
| security {
| key-store = "$akkaSSLKeyStore"
| key-store-password = "$akkaSSLKeyStorePassword"
| key-password = "$akkaSSLKeyPassword"
| trust-store = "$akkaSSLTrustStore"
| trust-store-password = "$akkaSSLTrustStorePassword"
| protocol = $akkaSSLProtocol
| enabled-algorithms = $akkaSSLAlgorithms
| random-number-generator = ""
| require-mutual-authentication = on
| }
| }
| }
| }
|}
""".stripMargin
}else{
""
}
ConfigFactory.parseString(configString + hostnameConfigString + sslConfigString).resolve()
}
def getLogLevel: String = {
if (LOG.isTraceEnabled) {
"TRACE"
} else {
if (LOG.isDebugEnabled) {
"DEBUG"
} else {
if (LOG.isInfoEnabled) {
"INFO"
} else {
if (LOG.isWarnEnabled) {
"WARNING"
} else {
if (LOG.isErrorEnabled) {
"ERROR"
} else {
"OFF"
}
}
}
}
}
}
/** Returns a [[Future]] to the [[ActorRef]] of the child of a given actor. The child is specified
* by providing its actor name.
*
* @param parent [[ActorRef]] to the parent of the child to be retrieved
* @param child Name of the child actor
* @param system [[ActorSystem]] to be used
* @param timeout Maximum timeout for the future
* @return [[Future]] to the [[ActorRef]] of the child actor
*/
def getChild(
parent: ActorRef,
child: String,
system: ActorSystem,
timeout: FiniteDuration)
: Future[ActorRef] = {
system.actorSelection(parent.path / child).resolveOne()(timeout)
}
/** Returns a [[Future]] to the [[ActorRef]] of an actor. The actor is specified by its path.
*
* @param path Path to the actor to be retrieved
* @param system [[ActorSystem]] to be used
* @param timeout Maximum timeout for the future
* @return [[Future]] to the [[ActorRef]] of the actor
*/
def getActorRefFuture(
path: String,
system: ActorSystem,
timeout: FiniteDuration)
: Future[ActorRef] = {
system.actorSelection(path).resolveOne()(timeout)
}
/** Returns an [[ActorRef]] for the actor specified by the path parameter.
*
* @param path Path to the actor to be retrieved
* @param system [[ActorSystem]] to be used
* @param timeout Maximum timeout for the future
* @throws java.io.IOException
* @return [[ActorRef]] of the requested [[Actor]]
*/
@throws(classOf[IOException])
def getActorRef(
path: String,
system: ActorSystem,
timeout: FiniteDuration)
: ActorRef = {
try {
val future = AkkaUtils.getActorRefFuture(path, system, timeout)
Await.result(future, timeout)
}
catch {
case e @ (_ : ActorNotFound | _ : TimeoutException) =>
throw new IOException(
s"Actor at $path not reachable. " +
"Please make sure that the actor is running and its port is reachable.", e)
case e: IOException =>
throw new IOException(s"Could not connect to the actor at $path", e)
}
}
/**
* Utility function to construct a future which tries multiple times to execute itself if it
* fails. If the maximum number of tries are exceeded, then the future fails.
*
* @param body function describing the future action
* @param tries number of maximum tries before the future fails
* @param executionContext which shall execute the future
* @tparam T return type of the future
* @return future which tries to recover by re-executing itself a given number of times
*/
def retry[T](body: => T, tries: Int)(implicit executionContext: ExecutionContext): Future[T] = {
Future{ body }.recoverWith{
case t:Throwable =>
if(tries > 0){
retry(body, tries - 1)
}else{
Future.failed(t)
}
}
}
/**
* Utility function to construct a future which tries multiple times to execute itself if it
* fails. If the maximum number of tries are exceeded, then the future fails.
*
* @param callable future action
* @param tries maximum number of tries before the future fails
* @param executionContext which shall execute the future
* @tparam T return type of the future
* @return future which tries to recover by re-executing itself a given number of times
*/
def retry[T](callable: Callable[T], tries: Int)(implicit executionContext: ExecutionContext):
Future[T] = {
retry(callable.call(), tries)
}
/**
* Utility function to construct a future which tries multiple times to execute itself if it
* fails. If the maximum number of tries are exceeded, then the future fails.
*
* @param target actor which receives the message
* @param message to be sent to the target actor
* @param tries maximum number of tries before the future fails
* @param executionContext which shall execute the future
* @param timeout of the future
* @return future which tries to recover by re-executing itself a given number of times
*/
def retry(target: ActorRef, message: Any, tries: Int)(implicit executionContext:
ExecutionContext, timeout: FiniteDuration): Future[Any] = {
(target ? message)(timeout) recoverWith{
case t: Throwable =>
if(tries > 0){
retry(target, message, tries-1)
}else{
Future.failed(t)
}
}
}
def getTimeout(config: Configuration): FiniteDuration = {
val duration = Duration(config.getString(AkkaOptions.ASK_TIMEOUT))
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getTimeoutAsTime(config: Configuration): Time = {
try {
val duration = Duration(config.getString(AkkaOptions.ASK_TIMEOUT))
Time.milliseconds(duration.toMillis)
} catch {
case _: NumberFormatException =>
throw new IllegalConfigurationException(AkkaUtils.formatDurationParsingErrorMessage)
}
}
def getDefaultTimeout: Time = {
val duration = Duration(AkkaOptions.ASK_TIMEOUT.defaultValue())
Time.milliseconds(duration.toMillis)
}
def getDefaultTimeoutAsFiniteDuration: FiniteDuration = {
val timeout = getDefaultTimeout
new FiniteDuration(timeout.toMilliseconds, TimeUnit.MILLISECONDS)
}
def getLookupTimeout(config: Configuration): FiniteDuration = {
val duration = Duration(config.getString(AkkaOptions.LOOKUP_TIMEOUT))
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getDefaultLookupTimeout: FiniteDuration = {
val duration = Duration(AkkaOptions.LOOKUP_TIMEOUT.defaultValue())
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getClientTimeout(config: Configuration): FiniteDuration = {
val duration = Duration(config.getString(AkkaOptions.CLIENT_TIMEOUT))
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getDefaultClientTimeout: FiniteDuration = {
val duration = Duration(AkkaOptions.CLIENT_TIMEOUT.defaultValue())
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
/** Returns the address of the given [[ActorSystem]]. The [[Address]] object contains
* the port and the host under which the actor system is reachable
*
* @param system [[ActorSystem]] for which the [[Address]] shall be retrieved
* @return [[Address]] of the given [[ActorSystem]]
*/
def getAddress(system: ActorSystem): Address = {
RemoteAddressExtension(system).address
}
/** Returns the given [[ActorRef]]'s path string representation with host and port of the
* [[ActorSystem]] in which the actor is running.
*
* @param system [[ActorSystem]] in which the given [[ActorRef]] is running
* @param actor [[ActorRef]] of the [[Actor]] for which the URL has to be generated
* @return String containing the [[ActorSystem]] independent URL of the [[Actor]]
*/
def getAkkaURL(system: ActorSystem, actor: ActorRef): String = {
val address = getAddress(system)
actor.path.toStringWithAddress(address)
}
/** Returns the AkkaURL for a given [[ActorSystem]] and a path describing a running [[Actor]] in
* the actor system.
*
* @param system [[ActorSystem]] in which the given [[Actor]] is running
* @param path Path describing an [[Actor]] for which the URL has to be generated
* @return String containing the [[ActorSystem]] independent URL of an [[Actor]] specified by
* path.
*/
def getAkkaURL(system: ActorSystem, path: String): String = {
val address = getAddress(system)
address.toString + path
}
/** Extracts the hostname and the port of the remote actor system from the given Akka URL. The
* result is an [[InetSocketAddress]] instance containing the extracted hostname and port. If
* the Akka URL does not contain the hostname and port information, e.g. a local Akka URL is
* provided, then an [[Exception]] is thrown.
*
* @param akkaURL The URL to extract the host and port from.
* @throws java.lang.Exception Thrown, if the given string does not represent a proper url
* @return The InetSocketAddress with the extracted host and port.
*/
@throws(classOf[Exception])
def getInetSocketAddressFromAkkaURL(akkaURL: String): InetSocketAddress = {
// AkkaURLs have the form schema://systemName@host:port/.... if it's a remote Akka URL
try {
val address = getAddressFromAkkaURL(akkaURL)
(address.host, address.port) match {
case (Some(hostname), Some(portValue)) => new InetSocketAddress(hostname, portValue)
case _ => throw new MalformedURLException()
}
}
catch {
case _ : MalformedURLException =>
throw new Exception(s"Could not retrieve InetSocketAddress from Akka URL $akkaURL")
}
}
/**
* Extracts the [[Address]] from the given akka URL.
*
* @param akkaURL to extract the [[Address]] from
* @throws java.net.MalformedURLException if the [[Address]] could not be parsed from
* the given akka URL
* @return Extracted [[Address]] from the given akka URL
*/
@throws(classOf[MalformedURLException])
def getAddressFromAkkaURL(akkaURL: String): Address = {
AddressFromURIString(akkaURL)
}
def formatDurationParsingErrorMessage: String = {
"Duration format must be \\"val unit\\", where 'val' is a number and 'unit' is " +
"(d|day)|(h|hour)|(min|minute)|s|sec|second)|(ms|milli|millisecond)|" +
"(µs|micro|microsecond)|(ns|nano|nanosecond)"
}
/**
* Returns the local akka url for the given actor name.
*
* @param actorName Actor name identifying the actor
* @return Local Akka URL for the given actor
*/
def getLocalAkkaURL(actorName: String): String = {
"akka://flink/user/" + actorName
}
/**
* Retries a function if it fails because of a [[java.net.BindException]].
*
* @param fn The function to retry
* @param stopCond Flag to signal termination
* @param maxSleepBetweenRetries Max random sleep time between retries
* @tparam T Return type of the function to retry
* @return Return value of the function to retry
*/
@tailrec
def retryOnBindException[T](
fn: => T,
stopCond: => Boolean,
maxSleepBetweenRetries : Long = 0 )
: scala.util.Try[T] = {
def sleepBeforeRetry() : Unit = {
if (maxSleepBetweenRetries > 0) {
val sleepTime = (Math.random() * maxSleepBetweenRetries).asInstanceOf[Long]
LOG.info(s"Retrying after bind exception. Sleeping for $sleepTime ms.")
Thread.sleep(sleepTime)
}
}
scala.util.Try {
fn
} match {
case scala.util.Failure(x: BindException) =>
if (stopCond) {
scala.util.Failure(x)
} else {
sleepBeforeRetry()
retryOnBindException(fn, stopCond)
}
case scala.util.Failure(x: Exception) => x.getCause match {
case _: ChannelException =>
if (stopCond) {
scala.util.Failure(new RuntimeException(
"Unable to do further retries starting the actor system"))
} else {
sleepBeforeRetry()
retryOnBindException(fn, stopCond)
}
case _ => scala.util.Failure(x)
}
case f => f
}
}
/**
* Terminates the given [[ActorSystem]] and returns its termination future.
*
* @param actorSystem to terminate
* @return Termination future
*/
def terminateActorSystem(actorSystem: ActorSystem): CompletableFuture[Void] = {
FutureUtils.toJava(actorSystem.terminate).thenAccept(FunctionUtils.ignoreFn())
}
}
| ueshin/apache-flink | flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala | Scala | apache-2.0 | 33,672 |
package de.tototec.sbuild.addons
/**
* Addons supporting the Bundle Tool from Peter Kriens.
*/
package object bnd {
} | SBuild-org/sbuild | de.tototec.sbuild.addons/src/main/scala/de/tototec/sbuild/addons/bnd/package.scala | Scala | apache-2.0 | 121 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib
import java.{util => ju}
object AbstractListTest extends AbstractListTest(new AbstractListFactory)
abstract class AbstractListTest[F <: AbstractListFactory](listFactory: F)
extends AbstractCollectionTest(listFactory) with ListTest {
override def testApi(): Unit = {
super.testApi()
testListApi(listFactory)
}
}
class AbstractListFactory extends AbstractCollectionFactory with ListFactory {
override def implementationName: String =
"java.util.AbstractList"
override def empty[E]: ju.AbstractList[E] = {
// inefficient but simple for debugging implementation of AbstractList
new ju.AbstractList[E] {
private var inner = scala.collection.immutable.List.empty[E]
override def get(index: Int): E = {
checkIndexInBounds(index)
inner(index)
}
override def size(): Int =
inner.size
override def add(index: Int, element: E): Unit = {
checkIndexOnBounds(index)
val (left, right) = inner.splitAt(index)
inner = left ::: element :: right
}
override def set(index: Int, element: E): E = {
checkIndexInBounds(index)
val (left, right) = inner.splitAt(index)
inner = left ::: element :: right.tail
right.head
}
override def remove(index: Int): E = {
checkIndexInBounds(index)
val (left, right) = inner.splitAt(index)
inner = left ::: right.tail
right.head
}
override def clear(): Unit =
inner = Nil
private def checkIndexInBounds(index: Int): Unit = {
if (index < 0 || index >= size)
throw new IndexOutOfBoundsException(index.toString)
}
private def checkIndexOnBounds(index: Int): Unit = {
if (index < 0 || index > size)
throw new IndexOutOfBoundsException(index.toString)
}
}
}
}
| jmnarloch/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/AbstractListTest.scala | Scala | bsd-3-clause | 2,431 |
package org.jetbrains.plugins.scala.lang.scaladoc.psi.impl
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementImpl
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocParamRef
class ScDocParamRefImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScDocParamRef{
override def toString: String = "ScDocParamRef"
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/scaladoc/psi/impl/ScDocParamRefImpl.scala | Scala | apache-2.0 | 378 |
/*
* Copyright (c) 2013. Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.berkeley.cs.amplab.adam.algorithms.smithwaterman
object SmithWatermanConstantGapScoring {
protected def constantGapFn (wMatch: Double, wDelete: Double, wInsert: Double, wMismatch: Double)(x: Int, y: Int, i: Char, j: Char): Double = {
if (x == y) {
wMatch
} else if (x == '_') {
wDelete
} else if (y == '_') {
wInsert
} else {
wMismatch
}
}
}
abstract class SmithWatermanConstantGapScoring (xSequence: String,
ySequence: String,
wMatch: Double,
wMismatch: Double,
wInsert: Double,
wDelete: Double)
extends SmithWatermanGapScoringFromFn (xSequence, ySequence, SmithWatermanConstantGapScoring.constantGapFn(wMatch,wMismatch,wInsert,wDelete)) {
}
| fnothaft/adam | adam-core/src/main/scala/edu/berkeley/cs/amplab/adam/algorithms/smithwaterman/SmithWatermanConstantGapScoring.scala | Scala | apache-2.0 | 1,397 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnetexamples.visualization
import ml.dmlc.mxnet.Symbol
/**
* @author Depeng Liang
*/
object VGG {
def getSymbol(numClasses: Int = 1000): Symbol = {
// define alexnet
val data = Symbol.Variable("data")
// group 1
val conv1_1 = Symbol.Convolution("conv1_1")()(
Map("data" -> data, "num_filter" -> 64, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu1_1 = Symbol.Activation("relu1_1")()(Map("data" -> conv1_1, "act_type" -> "relu"))
val pool1 = Symbol.Pooling("pool1")()(
Map("data" -> relu1_1, "pool_type" -> "max", "kernel" -> "(2, 2)", "stride" -> "(2,2)"))
// group 2
val conv2_1 = Symbol.Convolution("conv2_1")()(
Map("data" -> pool1, "num_filter" -> 128, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu2_1 = Symbol.Activation("relu2_1")()(Map("data" -> conv2_1, "act_type" -> "relu"))
val pool2 = Symbol.Pooling("pool2")()(
Map("data" -> relu2_1, "pool_type" -> "max", "kernel" -> "(2, 2)", "stride" -> "(2,2)"))
// group 3
val conv3_1 = Symbol.Convolution("conv3_1")()(
Map("data" -> pool2, "num_filter" -> 256, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu3_1 = Symbol.Activation("relu3_1")()(Map("data" -> conv3_1, "act_type" -> "relu"))
val conv3_2 = Symbol.Convolution("conv3_2")()(
Map("data" -> relu3_1, "num_filter" -> 256, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu3_2 = Symbol.Activation("relu3_2")()(Map("data" -> conv3_2 , "act_type" -> "relu"))
val pool3 = Symbol.Pooling("pool3")()(
Map("data" -> relu3_2, "pool_type" -> "max", "kernel" -> "(2, 2)", "stride" -> "(2,2)"))
// group 4
val conv4_1 = Symbol.Convolution("conv4_1")()(
Map("data" -> pool3, "num_filter" -> 512, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu4_1 = Symbol.Activation("relu4_1")()(Map("data" -> conv4_1 , "act_type" -> "relu"))
val conv4_2 = Symbol.Convolution("conv4_2")()(
Map("data" -> relu4_1, "num_filter" -> 512, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu4_2 = Symbol.Activation("relu4_2")()(Map("data" -> conv4_2 , "act_type" -> "relu"))
val pool4 = Symbol.Pooling("pool4")()(
Map("data" -> relu4_2, "pool_type" -> "max", "kernel" -> "(2, 2)", "stride" -> "(2,2)"))
// group 5
val conv5_1 = Symbol.Convolution("conv5_1")()(
Map("data" -> pool4, "num_filter" -> 512, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu5_1 = Symbol.Activation("relu5_1")()(Map("data" -> conv5_1, "act_type" -> "relu"))
val conv5_2 = Symbol.Convolution("conv5_2")()(
Map("data" -> relu5_1, "num_filter" -> 512, "pad" -> "(1,1)", "kernel" -> "(3,3)"))
val relu5_2 = Symbol.Activation("relu5_2")()(Map("data" -> conv5_2, "act_type" -> "relu"))
val pool5 = Symbol.Pooling("pool5")()(
Map("data" -> relu5_2, "pool_type" -> "max", "kernel" -> "(2, 2)", "stride" -> "(2,2)"))
// group 6
val flatten = Symbol.Flatten("flatten")()(Map("data" -> pool5))
val fc6 = Symbol.FullyConnected("fc6")()(Map("data" -> flatten, "num_hidden" -> 4096))
val relu6 = Symbol.Activation("relu6")()(Map("data" -> fc6, "act_type" -> "relu"))
val drop6 = Symbol.Dropout("drop6")()(Map("data" -> relu6, "p" -> 0.5f))
// group 7
val fc7 = Symbol.FullyConnected("fc7")()(Map("data" -> drop6, "num_hidden" -> 4096))
val relu7 = Symbol.Activation("relu7")()(Map("data" -> fc7, "act_type" -> "relu"))
val drop7 = Symbol.Dropout("drop7")()(Map("data" -> relu7, "p" -> 0.5f))
// output
val fc8 = Symbol.FullyConnected("fc8")()(
Map("data" -> drop7, "num_hidden" -> numClasses))
val softmax = Symbol.SoftmaxOutput("softmax")()(Map("data" -> fc8))
softmax
}
}
| nicklhy/mxnet | scala-package/examples/src/main/scala/ml/dmlc/mxnetexamples/visualization/VGG.scala | Scala | apache-2.0 | 4,519 |
trait Foo {
val x = "world"
foo(5)
def bar(x: Int): Int = 20
def foo(n: Int): String = x + n
}
class Bar extends Foo {
val y = "hello" // error
foo(5)
bar(10)
override def foo(n: Int): String = {
println("in foo")
y + x
}
}
class Qux extends Foo {
val y = "hello"
foo(5)
bar(10)
}
| som-snytt/dotty | tests/init/neg/override2.scala | Scala | apache-2.0 | 327 |