repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
---|---|---|
ihji/dotty
|
tests/run/phantom-param-accessor.scala
|
<gh_stars>0
import Boo._
object Test {
def main(args: Array[String]): Unit = {
new Foo(a).aVal
}
}
class Foo(val aVal: A)
object Boo extends Phantom {
type A = this.Nothing
def a = assume
}
|
ihji/dotty
|
tests/neg/phantom-volitile.scala
|
class Foo {
@volatile var foo1 = Boo.boo // error: var fields cannot have Phantom types
@volatile val foo2 = Boo.boo // error: Phantom fields cannot be @volatile
}
object Boo extends Phantom {
def boo = assume
}
|
ihji/dotty
|
tests/neg/enums.scala
|
package enums
enum List[+T] {
case Cons[T](x: T, xs: List[T]) // ok
case Snoc[U](xs: List[U], x: U) // error: different type parameters
}
enum class X {
case Y // error: case not allowed here
}
enum E1[T] {
case C // error: cannot determine type argument
}
enum E2[+T, +U >: T] {
case C // error: cannot determine type argument
}
enum E3[-T <: Ordered[T]] {
case C // error: cannot determine type argument
}
enum Option[+T] {
case Some[T](x: T)
case None
}
object Test {
class Unrelated
val x: Option[Int] = Option.Some(1)
x == new Unrelated // error: cannot compare
}
|
ihji/dotty
|
bot/src/dotty/tools/bot/Main.scala
|
package dotty.tools.bot
import org.http4s.server.{ Server, ServerApp }
import org.http4s.server.blaze._
import scalaz.concurrent.Task
object Main extends ServerApp with PullRequestService {
val githubUser = sys.env("GITHUB_USER")
val githubToken = sys.env("GITHUB_TOKEN")
val githubClientId = sys.env("GITHUB_CLIENT_ID")
val githubClientSecret = sys.env("GITHUB_CLIENT_SECRET")
val droneToken = sys.env("DRONE_TOKEN")
val port = sys.env("PORT").toInt
/** Services mounted to the server */
final val services = prService
override def server(args: List[String]): Task[Server] = {
BlazeBuilder
.bindHttp(port, "0.0.0.0")
.mountService(services, "/api")
.start
}
}
|
ihji/dotty
|
tests/neg/phantom-instanceOf-1.scala
|
class phantomInstanceOf1 {
null.asInstanceOf[Boo.Any] // error
null.asInstanceOf[Boo.Nothing] // error
"".asInstanceOf[Boo.Any] // error
"".asInstanceOf[Boo.Nothing] // error
}
object Boo extends Phantom {
def boo[B <: Boo.Any]: B = assume
}
|
ihji/dotty
|
tests/neg/phantom-trait-3.scala
|
<filename>tests/neg/phantom-trait-3.scala
class Boo1 extends Phantom // error
trait Boo2 extends Phantom // error
object Boo3 extends Phantom
|
ihji/dotty
|
sbt-dotty/src/dotty/tools/sbtplugin/DottyIDEPlugin.scala
|
<gh_stars>0
package dotty.tools.sbtplugin
import sbt._
import sbt.Keys._
import java.io._
import java.lang.ProcessBuilder
import scala.collection.mutable
import scala.util.Properties.{ isWin, isMac }
import dotty.tools.languageserver.config.ProjectConfig
import com.fasterxml.jackson.databind.ObjectMapper
import scala.collection.mutable.ListBuffer
import DottyPlugin.autoImport._
object DottyIDEPlugin extends AutoPlugin {
// Adapted from scala-reflect
private def distinctBy[A, B](xs: Seq[A])(f: A => B): Seq[A] = {
val buf = new mutable.ListBuffer[A]
val seen = mutable.Set[B]()
xs foreach { x =>
val y = f(x)
if (!seen(y)) {
buf += x
seen += y
}
}
buf.toList
}
private def isDottyVersion(version: String) =
version.startsWith("0.")
/** Return a new state derived from `state` such that scalaVersion returns `newScalaVersion` in all
* projects in `projRefs` (`state` is returned if no setting needed to be updated).
*/
private def updateScalaVersion(state: State, projRefs: Seq[ProjectRef], newScalaVersion: String): State = {
val extracted = Project.extract(state)
val settings = extracted.structure.data
if (projRefs.forall(projRef => scalaVersion.in(projRef).get(settings).get == newScalaVersion))
state
else {
def matchingSetting(setting: Setting[_]) =
setting.key.key == scalaVersion.key &&
setting.key.scope.project.fold(ref => projRefs.contains(ref), ifGlobal = true, ifThis = true)
val newSettings = extracted.session.mergeSettings.collect {
case setting if matchingSetting(setting) =>
scalaVersion in setting.key.scope := newScalaVersion
}
val newSession = extracted.session.appendRaw(newSettings)
BuiltinCommands.reapply(newSession, extracted.structure, state)
}
}
/** Setup to run in all dotty projects.
* Return a triplet of:
* (1) A version of dotty
* (2) A list of dotty projects
* (3) A state where `scalaVersion` is set to (1) in all projects in (2)
*/
private def dottySetup(state: State): (String, Seq[ProjectRef], State) = {
val structure = Project.structure(state)
val settings = structure.data
// FIXME: this function uses `sorted` to order versions but this is incorrect,
// we need an Ordering for version numbers, like the one in Coursier.
val (dottyVersions, dottyProjRefs) =
structure.allProjectRefs.flatMap { projRef =>
val version = scalaVersion.in(projRef).get(settings).get
if (isDottyVersion(version))
Some((version, projRef))
else
crossScalaVersions.in(projRef).get(settings).get.filter(isDottyVersion).sorted.lastOption match {
case Some(v) =>
Some((v, projRef))
case _ =>
None
}
}.unzip
if (dottyVersions.isEmpty)
throw new MessageOnlyException("No Dotty project detected")
else {
val dottyVersion = dottyVersions.sorted.last
val dottyState = updateScalaVersion(state, dottyProjRefs, dottyVersion)
(dottyVersion, dottyProjRefs, dottyState)
}
}
/** Run `task` in state `state` */
private def runTask[T](task: Task[T], state: State): T = {
val extracted = Project.extract(state)
val structure = extracted.structure
val (_, result) =
EvaluateTask.withStreams(structure, state) { streams =>
EvaluateTask.runTask(task, state, streams, structure.index.triggers,
EvaluateTask.extractedTaskConfig(extracted, structure, state))(
EvaluateTask.nodeView(state, streams, Nil)
)
}
result match {
case Value(v) =>
v
case Inc(i) =>
throw i
}
}
/** Run task `key` in all configurations in all projects in `projRefs`, using state `state` */
private def runInAllConfigurations[T](key: TaskKey[T], projRefs: Seq[ProjectRef], state: State): Seq[T] = {
val structure = Project.structure(state)
val settings = structure.data
val joinedTask = projRefs.flatMap { projRef =>
val project = Project.getProjectForReference(projRef, structure).get
project.configurations.flatMap { config =>
key.in(projRef, config).get(settings)
}
}.join
runTask(joinedTask, state)
}
/** Prepare command to be passed to ProcessBuilder */
def prepareCommand(cmd: Seq[String]): Seq[String] =
if (isWin) Seq("cmd.exe", "/C") ++ cmd
else cmd
/** Run `cmd`.
* @param wait If true, wait for `cmd` to return and throw an exception if the exit code is non-zero.
* @param directory If not null, run `cmd` in this directory.
*/
def runProcess(cmd: Seq[String], wait: Boolean = false, directory: File = null): Unit = {
val pb0 = new ProcessBuilder(prepareCommand(cmd): _*).inheritIO()
val pb = if (directory != null) pb0.directory(directory) else pb0
if (wait) {
val exitCode = pb.start().waitFor()
if (exitCode != 0) {
val cmdString = cmd.mkString(" ")
val description = if (directory != null) s""" in directory "$directory"""" else ""
throw new MessageOnlyException(s"""Running command "${cmdString}"${description} failed.""")
}
}
else
pb.start()
}
private val projectConfig = taskKey[Option[ProjectConfig]]("")
object autoImport {
val codeCommand = taskKey[Seq[String]]("Command to start VSCode")
val runCode = taskKey[Unit]("Start VSCode, usually called from launchIDE")
val launchIDE = taskKey[Unit]("Configure and run VSCode on this project")
}
import autoImport._
override def requires: Plugins = plugins.JvmPlugin
override def trigger = allRequirements
def configureIDE = Command.command("configureIDE") { origState =>
val (dottyVersion, projRefs, dottyState) = dottySetup(origState)
val configs0 = runInAllConfigurations(projectConfig, projRefs, dottyState).flatten
// Drop configurations that do not define their own sources, but just
// inherit their sources from some other configuration.
val configs = distinctBy(configs0)(_.sourceDirectories.deep)
// Write the version of the Dotty Language Server to use in a file by itself.
// This could be a field in the JSON config file, but that would require all
// IDE plugins to parse JSON.
val dlsVersion = dottyVersion
.replace("-nonbootstrapped", "") // The language server is only published bootstrapped
val dlsBinaryVersion = dlsVersion.split("\\.").take(2).mkString(".")
val pwArtifact = new PrintWriter(".dotty-ide-artifact")
try {
pwArtifact.println(s"ch.epfl.lamp:dotty-language-server_${dlsBinaryVersion}:${dlsVersion}")
} finally {
pwArtifact.close()
}
val mapper = new ObjectMapper
mapper.writerWithDefaultPrettyPrinter()
.writeValue(new File(".dotty-ide.json"), configs.toArray)
origState
}
def compileForIDE = Command.command("compileForIDE") { origState =>
val (dottyVersion, projRefs, dottyState) = dottySetup(origState)
runInAllConfigurations(compile, projRefs, dottyState)
origState
}
override def projectSettings: Seq[Setting[_]] = Seq(
// Use Def.derive so `projectConfig` is only defined in the configurations where the
// tasks/settings it depends on are defined.
Def.derive(projectConfig := {
if (sources.value.isEmpty) None
else {
// Not needed to generate the config, but this guarantees that the
// generated config is usable by an IDE without any extra compilation
// step.
val _ = compile.value
val id = s"${thisProject.value.id}/${configuration.value.name}"
val compilerVersion = scalaVersion.value
val compilerArguments = scalacOptions.value
val sourceDirectories = unmanagedSourceDirectories.value ++ managedSourceDirectories.value
val depClasspath = Attributed.data(dependencyClasspath.value)
val classDir = classDirectory.value
Some(new ProjectConfig(
id,
compilerVersion,
compilerArguments.toArray,
sourceDirectories.toArray,
depClasspath.toArray,
classDir
))
}
})
)
override def buildSettings: Seq[Setting[_]] = Seq(
commands ++= Seq(configureIDE, compileForIDE),
codeCommand := {
Seq("code", "-n")
},
runCode := {
try {
runProcess(codeCommand.value ++ Seq("--install-extension", "lampepfl.dotty"), wait = true)
runProcess(codeCommand.value ++ Seq("."), directory = baseDirectory.value)
} catch {
case ioex: IOException if ioex.getMessage.startsWith("""Cannot run program "code"""") =>
val log = streams.value.log
log.error(
"""Could not find Visual Studio Code on your system.
|Follow the instructions at http://dotty.epfl.ch/docs/usage/ide-support.html
|to install it.""".stripMargin)
throw new FeedbackProvidedException {
override def toString = "Could not find Visual Studio Code on your system."
}
}
}
) ++ addCommandAlias("launchIDE", ";configureIDE;runCode")
}
|
ihji/dotty
|
tests/run/phantom-methods-1.scala
|
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
fun(phantomFun1())
}
def fun(top: BooAny): Unit = ()
def phantomFun1(): Pinky = boo[Pinky]
}
object Boo extends Phantom {
type BooAny = Boo.Any
type Pinky <: Boo.Any
def boo[B <: Boo.Any]: B = assume
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/localopt/InlineLabelsCalledOnce.scala
|
package dotty.tools.dotc
package transform.localopt
import core.Contexts.Context
import core.Symbols._
import core.Flags._
import transform.SymUtils._
import scala.collection.mutable
import config.Printers.simplify
/** Inlines LabelDef which are used exactly once.
*
* @author DarkDimius, OlivierBlanvillain
*/
class InlineLabelsCalledOnce extends Optimisation {
import ast.tpd._
val timesUsed = mutable.HashMap[Symbol, Int]()
val defined = mutable.HashMap[Symbol, DefDef]()
def clear(): Unit = {
timesUsed.clear()
defined.clear()
}
def visitor(implicit ctx: Context): Tree => Unit = {
case d: DefDef if d.symbol.is(Label) =>
var isRecursive = false
d.rhs.foreachSubTree { x =>
if (x.symbol == d.symbol)
isRecursive = true
}
if (!isRecursive)
defined.put(d.symbol, d)
case t: Apply if t.symbol.is(Label) =>
val b4 = timesUsed.getOrElseUpdate(t.symbol, 0)
timesUsed.put(t.symbol, b4 + 1)
case _ =>
}
def transformer(implicit ctx: Context): Tree => Tree = {
case a: Apply =>
defined.get(a.symbol) match {
case Some(defDef) if usedOnce(a) && a.symbol.info.paramInfoss == List(Nil) =>
simplify.println(s"Inlining labeldef ${defDef.name}")
defDef.rhs.changeOwner(defDef.symbol, ctx.owner)
case Some(defDef) if defDef.rhs.isInstanceOf[Literal] =>
defDef.rhs
case _ => a
}
case d: DefDef if usedOnce(d) =>
simplify.println(s"Dropping labeldef (used once) ${d.name} ${timesUsed.get(d.symbol)}")
defined.put(d.symbol, d)
EmptyTree
case d: DefDef if neverUsed(d) =>
simplify.println(s"Dropping labeldef (never used) ${d.name} ${timesUsed.get(d.symbol)}")
EmptyTree
case t => t
}
def usedN(t: Tree, n: Int)(implicit ctx: Context): Boolean =
t.symbol.is(Label) &&
timesUsed.getOrElse(t.symbol, 0) == n &&
defined.contains(t.symbol)
def usedOnce(t: Tree)(implicit ctx: Context): Boolean = usedN(t, 1)
def neverUsed(t: Tree)(implicit ctx: Context): Boolean = usedN(t, 0)
}
|
ihji/dotty
|
bot/src/dotty/tools/bot/PullRequestService.scala
|
<filename>bot/src/dotty/tools/bot/PullRequestService.scala
package dotty.tools
package bot
import org.http4s.{ Status => _, _ }
import org.http4s.client.blaze._
import org.http4s.client.Client
import org.http4s.headers.{ Accept, Authorization }
import cats.syntax.applicative._
import scalaz.concurrent.Task
import scala.util.control.NonFatal
import scala.Function.tupled
import io.circe._
import io.circe.generic.auto._
import io.circe.syntax._
import org.http4s.circe._
import org.http4s.dsl._
import org.http4s.util._
import model.Github._
import model.Drone
import bot.util.TaskIsApplicative._
import bot.util.HttpClientAux._
trait PullRequestService {
/** Username for authorized admin */
def githubUser: String
/** OAuth token needed for user to create statuses */
def githubToken: String
/** OAuth token for drone, needed to cancel builds */
def droneToken: String
/** OAuthed application's "client_id" */
def githubClientId: String
/** OAuthed application's "client_secret" */
def githubClientSecret: String
/** Pull Request HTTP service */
val prService = HttpService {
case GET -> Root / "rate" => {
val client = PooledHttp1Client()
for {
rates <- client.expect(get(rateLimit))(EntityDecoder.text)
resp <- Ok(rates)
_ <- client.shutdown
} yield resp
}
case request @ POST -> Root =>
val githubEvent =
request.headers
.get(CaseInsensitiveString("X-GitHub-Event"))
.map(_.value).getOrElse("")
githubEvent match {
case "pull_request" =>
request.as(jsonOf[Issue]).flatMap(checkPullRequest)
case "issue_comment" =>
request.as(jsonOf[IssueComment]).flatMap(respondToComment)
case "" =>
BadRequest("Missing header: X-Github-Event")
case event =>
BadRequest("Unsupported event: $event")
}
}
private[this] val droneContext = "continuous-integration/drone/pr"
private final case class CLASignature(
user: String,
signed: Boolean,
version: String,
currentVersion: String
)
private[this] val githubUrl = "https://api.github.com"
private[this] def withGithubSecret(url: String, extras: String*): String =
s"$url?client_id=$githubClientId&client_secret=$githubClientSecret" + extras.mkString("&", "&", "")
def rateLimit: String = withGithubSecret("https://api.github.com/rate_limit")
def claUrl(userName: String): String =
s"https://www.lightbend.com/contribute/cla/scala/check/$userName"
def commitsUrl(prNumber: Int): String =
withGithubSecret(s"$githubUrl/repos/lampepfl/dotty/pulls/$prNumber/commits", "per_page=100")
def statusUrl(sha: String): String =
withGithubSecret(s"$githubUrl/repos/lampepfl/dotty/statuses/$sha")
def issueCommentsUrl(issueNbr: Int): String =
withGithubSecret(s"$githubUrl/repos/lampepfl/dotty/issues/$issueNbr/comments")
def reviewUrl(issueNbr: Int): String =
withGithubSecret(s"$githubUrl/repos/lampepfl/dotty/pulls/$issueNbr/reviews")
def contributorsUrl: String =
withGithubSecret("https://api.github.com/repos/lampepfl/dotty/contributors")
sealed trait CommitStatus {
def commit: Commit
def isValid: Boolean
}
final case class Valid(user: Option[String], commit: Commit) extends CommitStatus { def isValid = true }
final case class Invalid(user: String, commit: Commit) extends CommitStatus { def isValid = false }
final case class CLAServiceDown(user: String, commit: Commit) extends CommitStatus { def isValid = false }
final case class MissingUser(commit: Commit) extends CommitStatus { def isValid = false }
final case class InvalidPrevious(users: List[String], commit: Commit) extends CommitStatus { def isValid = false }
/** Partitions invalid and valid commits */
def checkCLA(xs: List[Commit])(implicit client: Client): Task[List[CommitStatus]] = {
def checkUser(user: String): Task[Commit => CommitStatus] = {
val claStatus = for {
claRes <- client.expect(get(claUrl(user)))(jsonOf[CLASignature])
} yield { (commit: Commit) =>
if (claRes.signed) Valid(Some(user), commit)
else Invalid(user, commit)
}
claStatus.handleWith {
case NonFatal(e) =>
println(e)
Task.now((commit: Commit) => CLAServiceDown(user, commit))
}
}
def checkCommit(author: Author, commit: List[Commit]): Task[List[CommitStatus]] =
author.login.map(checkUser)
.getOrElse(Task.now(MissingUser))
.map(f => commit.map(f))
Task.gatherUnordered {
val groupedByAuthor: Map[Author, List[Commit]] = xs.groupBy(_.author)
groupedByAuthor.map(tupled(checkCommit)).toList
}.map(_.flatten)
}
def setStatus(cm: CommitStatus, httpClient: Client): Task[StatusResponse] = {
val desc =
if (cm.isValid) "User signed CLA"
else "User needs to sign cla: https://www.lightbend.com/contribute/cla/scala"
val stat = cm match {
case Valid(Some(user), commit) =>
Status("success", claUrl(user), desc)
case Valid(None, commit) =>
Status("success", "", "All contributors signed CLA")
case Invalid(user, commit) =>
Status("failure", claUrl(user), desc)
case MissingUser(commit) =>
Status("failure", "", "Missing valid github user for this PR")
case CLAServiceDown(user, commit) =>
Status("pending", claUrl(user), "CLA Service is down")
case InvalidPrevious(users, latestCommit) =>
Status("failure", "", users.map("@" + _).mkString(", ") + " have not signed the CLA")
}
for {
req <- post(statusUrl(cm.commit.sha)).withAuth(githubUser, githubToken)
res <- httpClient.expect(req.withBody(stat.asJson))(jsonOf[StatusResponse])
} yield res
}
def sendStatuses(xs: List[CommitStatus], httpClient: Client): Task[List[StatusResponse]] =
Task.gatherUnordered(xs.map(setStatus(_, httpClient)))
private[this] val ExtractLink = """<([^>]+)>; rel="([^"]+)"""".r
def findNext(header: Option[Header]): Option[String] = header.flatMap { header =>
val value = header.value
value
.split(',')
.collect {
case ExtractLink(url, kind) if kind == "next" =>
url
}
.headOption
}
/** Get all contributors from GitHub */
def getContributors(implicit client: Client): Task[Set[String]] =
for {
authors <- client.expect(get(contributorsUrl))(jsonOf[List[Author]])
logins = authors.map(_.login).flatten
} yield logins.toSet
/** Ordered from earliest to latest */
def getCommits(issueNbr: Int)(implicit httpClient: Client): Task[List[Commit]] = {
def makeRequest(url: String): Task[List[Commit]] =
for {
res <- httpClient.fetch(get(url)) { res =>
val link = CaseInsensitiveString("Link")
val next = findNext(res.headers.get(link)).map(makeRequest).getOrElse(Task.now(Nil))
res.as[List[Commit]](jsonOf[List[Commit]]).flatMap(commits => next.map(commits ++ _))
}
} yield res
makeRequest(commitsUrl(issueNbr))
}
def getComments(issueNbr: Int, httpClient: Client): Task[List[Comment]] =
httpClient.expect(get(issueCommentsUrl(issueNbr)))(jsonOf[List[Comment]])
private def usersFromInvalid(xs: List[CommitStatus]) =
xs.collect { case Invalid(user, _) => user }
def hasBadCommitMessages(commits: List[Commit]): Boolean =
commits.exists { cm =>
val firstLine = cm.commit.message.takeWhile(_ != '\n').trim.toLowerCase
val firstWord = firstLine.takeWhile(x => x != ':' && x != ' ')
val containsColon = firstLine.contains(':')
val wrongTense = firstWord match {
case "added" | "fixed" | "removed" | "moved" | "changed" |
"finalized" | "re-added"
=> true
case "adds" | "fixes" | "removes" | "moves" | "changes" |
"finalizes" | "re-adds"
=> true
case _
=> false
}
wrongTense || firstLine.last == '.' || firstLine.length > 80
}
/** Returns the body of a `ReviewResponse` */
def sendInitialComment(issueNbr: Int,
invalidUsers: List[String],
commits: List[Commit],
newContributors: Boolean)(implicit client: Client): Task[String] = {
val cla = if (invalidUsers.nonEmpty) {
s"""|## CLA ##
|In order for us to be able to accept your contribution, all users
|must sign the Scala CLA.
|
|Users:
|${ invalidUsers.map("@" + _).mkString("- ", "\n- ", "") }
|
|Could you folks please sign the CLA? :pray:
|
|Please do this here: https://www.lightbend.com/contribute/cla/scala
|""".stripMargin
} else "All contributors have signed the CLA, thank you! :heart:"
val commitRules = if (hasBadCommitMessages(commits)) {
"""|## Commit Messages ##
|We want to keep history, but for that to actually be useful we have
|some rules on how to format our commit messages ([relevant xkcd](https://xkcd.com/1296/)).
|
|Please stick to these guidelines for commit messages:
|
|> 1. Separate subject from body with a blank line
|> 1. When fixing an issue, start your commit message with `Fix #<ISSUE-NBR>: `
|> 1. Limit the subject line to 72 characters
|> 1. Capitalize the subject line
|> 1. Do not end the subject line with a period
|> 1. Use the imperative mood in the subject line ("Added" instead of "Add")
|> 1. Wrap the body at 80 characters
|> 1. Use the body to explain what and why vs. how
|>
|> adapted from https://chris.beams.io/posts/git-commit""".stripMargin
} else ""
val body =
s"""|Hello, and thank you for opening this PR! :tada:
|
|$cla
|
|$commitRules
|
|Have an awesome day! :sunny:""".stripMargin
val review = Review.comment(body)
val shouldPost = newContributors || commitRules.nonEmpty || invalidUsers.nonEmpty
for {
req <- post(reviewUrl(issueNbr)).withAuth(githubUser, githubToken)
res <- {
if (shouldPost)
client.expect(req.withBody(review.asJson))(jsonOf[ReviewResponse]).map(_.body)
else
Task.now("")
}
} yield res
}
def checkFreshPR(issue: Issue): Task[Response] = {
implicit val httpClient = PooledHttp1Client()
for {
commits <- getCommits(issue.number)
statuses <- checkCLA(commits)
(validStatuses, invalidStatuses) = statuses.partition(_.isValid)
invalidUsers = usersFromInvalid(invalidStatuses)
// Mark the invalid commits:
_ <- sendStatuses(invalidStatuses, httpClient)
// Set status of last to indicate previous failures or all good:
_ <- {
if (invalidStatuses.nonEmpty)
setStatus(InvalidPrevious(invalidUsers, commits.last), httpClient)
else
setStatus(statuses.last, httpClient)
}
authors = commits.map(_.author.login).flatten.toSet
contribs <- getContributors
newContr = !authors.forall(contribs.contains)
_ <- sendInitialComment(issue.number, invalidUsers, commits, newContr)
_ <- httpClient.shutdown
resp <- Ok("Fresh PR checked")
} yield resp
}
def getStatus(commit: Commit, client: Client): Task[List[StatusResponse]] =
client.expect(get(statusUrl(commit.sha)))(jsonOf[List[StatusResponse]])
private def extractCommitSha(status: StatusResponse): Task[String] =
Task.delay(status.sha)
def startBuild(commit: Commit)(implicit client: Client): Task[Drone.Build] = {
def pendingStatus(targetUrl: String): Status =
Status("pending", targetUrl, "build restarted by bot", droneContext)
def filterStatuses(xs: List[StatusResponse]): Task[Int] =
xs.filter { status =>
(status.state == "failure" || status.state == "success") &&
status.context == droneContext
}
.map(status => Task.now(status.target_url.split('/').last.toInt))
.headOption
.getOrElse(Task.fail(new NoSuchElementException("Couldn't find drone build for PR")))
for {
statuses <- getStatus(commit, client)
failed <- filterStatuses(statuses)
build <- Drone.startBuild(failed, droneToken)
setStatusReq <- post(statusUrl(commit.sha)).withAuth(githubUser, githubToken)
newStatus = pendingStatus(s"http://dotty-ci.epfl.ch/lampepfl/dotty/$failed").asJson
_ <- client.expect(setStatusReq.withBody(newStatus))(jsonOf[StatusResponse])
} yield build
}
def cancelBuilds(commits: List[Commit])(implicit client: Client): Task[Boolean] =
Task.gatherUnordered {
commits.map { commit =>
for {
statuses <- getStatus(commit, client)
cancellable = statuses.filter(status => status.state == "pending" && status.context == droneContext)
runningJobs = cancellable.map(_.target_url.split('/').last.toInt)
cancelled <- Task.gatherUnordered(runningJobs.map(Drone.stopBuild(_, droneToken)))
} yield cancelled.forall(identity)
}
}
.map(_.forall(identity))
def checkSynchronize(issue: Issue): Task[Response] = {
implicit val client = PooledHttp1Client()
def extractFailures(c: List[CommitStatus]): List[String] = c.collect {
case Invalid(user, _) =>
s"@$user hasn't signed the CLA"
case MissingUser(commit) =>
s"missing user for commit: ${commit.sha} - correct email associated with GitHub account?"
case CLAServiceDown(user, _) =>
s"couldn't fetch status for: $user"
}
for {
commits <- getCommits(issue.number)
statuses <- checkCLA(commits)
invalid = statuses.filterNot(_.isValid)
_ <- sendStatuses(invalid, client)
_ <- cancelBuilds(commits.dropRight(1))
// Set final commit status based on `invalid`:
_ <- {
if (invalid.nonEmpty)
setStatus(InvalidPrevious(usersFromInvalid(invalid), commits.last), client)
else
setStatus(statuses.last, client)
}
// Send comment regarding recheck:
comment =
if (invalid.isEmpty) "All users have signed the CLA as far as I can tell! :tada:"
else s"There are still some issues:\n\n- ${extractFailures(invalid).mkString("\n- ")}"
req <- post(issueCommentsUrl(issue.number)).withAuth(githubUser, githubToken)
_ <- client.fetch(req.withBody(comment.asJson))(Task.now)
_ <- client.shutdown
resp <- Ok("Updated PR checked")
} yield resp
}
def checkPullRequest(issue: Issue): Task[Response] =
issue.action match {
case Some("opened") => checkFreshPR(issue)
case Some("synchronize") => checkSynchronize(issue)
case Some(action) => BadRequest(s"Unhandled action: $action")
case None => BadRequest("Cannot check pull request, missing action field")
}
def restartCI(issue: Issue): Task[Response] = {
implicit val client = PooledHttp1Client()
def restartedComment: Comment = {
import scala.util.Random
val answers = Array(
"Okidokey, boss! :clap:",
"You got it, homie! :pray:",
"No problem, big shot! :punch:",
"Sure thing, I got your back! :heart:",
"No WAY! :-1: ...wait, don't fire me please! There, I did it! :tada:"
)
Comment(Author(None), answers(Random.nextInt(answers.length)))
}
for {
commits <- getCommits(issue.number)
latest = commits.last
_ <- cancelBuilds(latest :: Nil)
_ <- startBuild(latest)
req <- post(issueCommentsUrl(issue.number)).withAuth(githubUser, githubToken)
_ <- client.fetch(req.withBody(restartedComment.asJson))(Task.now)
res <- Ok("Replied to request for CI restart")
} yield res
}
def cannotUnderstand(line: String, issueComment: IssueComment): Task[Response] = {
implicit val client = PooledHttp1Client()
val comment = Comment(Author(None), {
s"""Hey, sorry - I could not understand what you meant by:
|
|> $line
|
|I'm just a dumb bot after all :cry:
|
|I mostly understand when your mention contains these words:
|
|- (re)check (the) cla
|- recheck
|- restart drone
|
|Maybe if you want to make me smarter, you could open a PR? :heart_eyes:
|""".stripMargin
})
for {
req <- post(issueCommentsUrl(issueComment.issue.number)).withAuth(githubUser, githubToken)
_ <- client.fetch(req.withBody(comment.asJson))(Task.now)
res <- Ok("Delivered could not understand comment")
} yield res
}
def extractMention(body: String): Option[String] =
body.lines.find(_.startsWith("@dotty-bot:"))
/** Try to make sense of what the user is requesting from the bot
*
* The bot's abilities currently only include:
*
* - Checking or re-checking the CLA
* - Restarting the CI tests
*
* @note The implementation here could be quite elegant if we used a trie
* instead
*/
def interpretMention(line: String, issueComment: IssueComment): Task[Response] = {
val loweredLine = line.toLowerCase
if (loweredLine.contains("check cla") || loweredLine.contains("check the cla"))
checkSynchronize(issueComment.issue)
else if (loweredLine.contains("recheck") || loweredLine.contains("restart drone"))
restartCI(issueComment.issue)
else
cannotUnderstand(line, issueComment)
}
def respondToComment(issueComment: IssueComment): Task[Response] =
extractMention(issueComment.comment.body)
.map(interpretMention(_, issueComment))
.getOrElse(Ok("Nothing to do here, move along!"))
}
|
ihji/dotty
|
library/src/dotty/DottyPredef.scala
|
package dotty
import scala.reflect.ClassTag
import scala.Predef.???
object DottyPredef {
/** A class for implicit values that can serve as implicit conversions
* The implicit resolution algorithm will act as if there existed
* the additional implicit definition:
*
* def $implicitConversion[T, U](x: T)(c: ImplicitConverter[T, U]): U = c(x)
*
* However, the presence of this definition would slow down implicit search since
* its outermost type matches any pair of types. Therefore, implicit search
* contains a special case in `Implicits#discardForView` which emulates the
* conversion in a more efficient way.
*
* Note that this is a SAM class - function literals are automatically converted
* to `ImplicitConverter` values.
*
* Also note that in bootstrapped dotty, `Predef.<:<` should inherit from
* `ImplicitConverter`. This would cut the number of special cases in
* `discardForView` from two to one.
*/
abstract class ImplicitConverter[-T, +U] extends Function1[T, U]
}
|
ihji/dotty
|
tests/run/phantom-methods-5.scala
|
<reponame>ihji/dotty<gh_stars>0
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
pacFun1(boo[Blinky])
pacFun1(boo[Inky])
pacFun1(boo[Pinky])
}
def pacFun1(blinky: Blinky) = {
println("customFun1")
}
}
object Boo extends Phantom {
type Blinky <: Boo.Any
type Inky <: Blinky
type Pinky <: Inky
def boo[B <: Boo.Any]: B = assume
}
|
ihji/dotty
|
tests/neg/phantom-fun-app.scala
|
<filename>tests/neg/phantom-fun-app.scala
class phantomFunApp {
import Boo._ // Note: this is dangerous as it imports Boo.Any as Any
def foo1(a: Any) = ???
def foo2(b: BooAny) = ???
foo1(1)
foo1(boo[Blinky]) // error
foo1(boo[Pinky]) // error
foo2(boo[Blinky])
foo2(boo[Pinky])
foo2(1) // error
foo2("abc") // error
foo2(???) // error
}
object Boo extends Phantom {
type BooAny = this.Any
type Blinky <: BooAny
type Pinky <: Blinky
def boo[B <: BooAny]: B = assume
}
|
ihji/dotty
|
tests/neg/phantom-AndOr.scala
|
<filename>tests/neg/phantom-AndOr.scala
class BooFunDef1 {
import Boo._
def fun1(b: BooAny | Any) = ??? // error
def fun2(b: BooAny | Any | Any) = ??? // error // error
def fun3(b: Any | BooAny | Any) = ??? // error
def fun4(b: BooAny | BooAny | Any) = ??? // error
def fun5(b: BooAny & Any) = ??? // error
def fun6(b: Any & BooAny & Any) = ??? // error
def fun7(b: BooAny & Any & Any) = ??? // error // error
def fun8(b: Any & Any & BooAny) = ??? // error
}
object Boo extends Phantom {
type BooAny = this.Any
}
|
ihji/dotty
|
tests/neg/phantom-multiversal-type-param-bounds-2.scala
|
<gh_stars>0
class phantomTypeParamBounds2 {
import Universe1._
import UniverseA._
def fun1[X <: One & A] = ??? // error
def fun2[X <: One | A] = ??? // error
def fun3[X >: OneNothing & ANothing] = ??? // error
def fun4[X >: OneNothing | ANothing] = ??? // error
def fun5[X >: One & A <: One & A] = ??? // error // error
}
object Universe1 extends Phantom {
type One <: this.Any
type OneNothing = this.Nothing
}
object UniverseA extends Phantom {
type A <: this.Any
type ANothing = this.Nothing
}
|
ihji/dotty
|
tests/run/phantom-3.scala
|
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
fun3(boo[Blinky], boo[Pinky])
fun3(boo[Inky], boo[Pinky])
fun3(boo[Pinky], boo[Casper])
}
def fun3(x1: Blinky, x2: Inky): Unit = {
println("fun3")
}
}
object Boo extends Phantom {
type Blinky <: this.Any
type Inky <: Blinky
type Pinky <: Inky
type Casper = Pinky
def boo[B <: Blinky]: B = assume
}
|
ihji/dotty
|
tests/neg/customArgs/phantom-overload.scala
|
<reponame>ihji/dotty
class phantomOverload {
import Boo._
import Boo2._
def foo1(): A = nothing
def foo1(): B = nothing // error
def foo1(): C = nothing2 // error
def foo1(): N = nothing // error
def foo2(x: A) = ???
def foo2(x: A) = ??? // error
def foo2(x: B) = ??? // error
def foo2(x: C) = ??? // error
def foo2(x: N) = ??? // error
}
object Boo extends Phantom {
type A <: this.Any
type B <: this.Any
type N = this.Nothing
def nothing: this.Nothing = assume
}
object Boo2 extends Phantom {
type C <: this.Any
def nothing2: this.Nothing = assume
}
|
ihji/dotty
|
tests/run/phantom-poly-2.scala
|
<reponame>ihji/dotty<gh_stars>0
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
polyfun2(boo[Blinky])
polyfun2(boo[Inky])
polyfun2(boo[Pinky])
}
def polyfun2[G <: Blinky](p: G): Unit = {
println("polyfun2")
}
}
object Boo extends Phantom {
type Blinky <: this.Any
type Inky <: Blinky
type Pinky <: Inky
def boo[B <: this.Any]: B = assume
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/localopt/Varify.scala
|
<gh_stars>0
package dotty.tools.dotc
package transform.localopt
import core._
import core.Contexts.Context
import core.Symbols._
import core.Flags._
import scala.collection.mutable
/** Inline val with exactly one assignment to a var. For example:
*
* {
* val l = <expr>
* // code that may use l
* var r = l
* // code not using l
* }
*
* becomes:
*
* {
* var r = <expr>
* // code that may use l
* // code not using l
* }
*/
class Varify extends Optimisation {
import ast.tpd._
val paramsTimesUsed = mutable.HashMap[Symbol, Int]()
val possibleRenames = mutable.HashMap[Symbol, Set[Symbol]]()
def clear(): Unit = {
paramsTimesUsed.clear()
possibleRenames.clear()
}
def visitor(implicit ctx: Context): Tree => Unit = {
case t: ValDef if t.symbol.is(Param) =>
paramsTimesUsed += (t.symbol -> 0)
case t: ValDef if t.symbol.is(Mutable) =>
t.rhs.foreachSubTree { subtree =>
if (paramsTimesUsed.contains(subtree.symbol) &&
t.symbol.info.widenDealias <:< subtree.symbol.info.widenDealias) {
val newSet = possibleRenames.getOrElse(t.symbol, Set.empty) + subtree.symbol
possibleRenames.put(t.symbol, newSet)
}
}
case t: RefTree if paramsTimesUsed.contains(t.symbol) =>
val param = t.symbol
val current = paramsTimesUsed.get(param)
current foreach { c => paramsTimesUsed += (param -> (c + 1)) }
case _ =>
}
def transformer(implicit ctx: Context): Tree => Tree = {
val paramCandidates = paramsTimesUsed.filter(kv => kv._2 == 1).keySet
val renames: Map[Symbol, Symbol] = possibleRenames.iterator
.map(kv => (kv._1, kv._2.intersect(paramCandidates)))
.filter(x => x._2.nonEmpty)
.map(x => (x._1, x._2.head))
.toMap
val transformation: Tree => Tree = {
case t: RefTree if renames.contains(t.symbol) =>
ref(renames(t.symbol))
case t: ValDef if renames.contains(t.symbol) =>
val replaced = renames(t.symbol)
if (t.rhs.symbol == replaced) EmptyTree
else ref(replaced).becomes(t.rhs)
case t: ValDef if paramCandidates.contains(t.symbol) =>
t.symbol.flags = Mutable
t
case t => t
}
transformation
}
}
|
ihji/dotty
|
tests/neg/phantom-classOf-2.scala
|
class phantomClassOf {
type Blinky <: Boo.BooAny
classOf[Blinky] // error
}
object Boo extends Phantom {
type BooAny = this.Any
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/FirstTransform.scala
|
package dotty.tools.dotc
package transform
import core._
import Names._
import dotty.tools.dotc.ast.tpd
import dotty.tools.dotc.core.Phases.NeedsCompanions
import dotty.tools.dotc.transform.TreeTransforms._
import ast.Trees._
import Flags._
import Types._
import Constants.Constant
import Contexts.Context
import Symbols._
import SymDenotations._
import Decorators._
import dotty.tools.dotc.core.Annotations.ConcreteAnnotation
import dotty.tools.dotc.core.Denotations.SingleDenotation
import scala.collection.mutable
import DenotTransformers._
import typer.Checking
import NameOps._
import NameKinds.{AvoidClashName, OuterSelectName}
import StdNames._
/** The first tree transform
* - ensures there are companion objects for all classes except module classes
* - eliminates some kinds of trees: Imports, NamedArgs
* - stubs out native methods
* - eliminates self tree in Template and self symbol in ClassInfo
* - collapses all type trees to trees of class TypeTree
* - converts idempotent expressions with constant types
* - drops branches of ifs using the rules
* if (true) A else B --> A
* if (false) A else B --> B
*/
class FirstTransform extends MiniPhaseTransform with InfoTransformer with AnnotationTransformer { thisTransformer =>
import ast.tpd._
override def phaseName = "firstTransform"
private var addCompanionPhases: List[NeedsCompanions] = _
override def changesMembers = true // the phase adds companion objects
def needsCompanion(cls: ClassSymbol)(implicit ctx: Context) =
addCompanionPhases.exists(_.isCompanionNeeded(cls))
override def prepareForUnit(tree: tpd.Tree)(implicit ctx: Context): TreeTransform = {
addCompanionPhases = ctx.phasePlan.flatMap(_ collect { case p: NeedsCompanions => p })
this
}
/** eliminate self symbol in ClassInfo */
override def transformInfo(tp: Type, sym: Symbol)(implicit ctx: Context): Type = tp match {
case tp @ ClassInfo(_, _, _, _, self: Symbol) =>
tp.derivedClassInfo(selfInfo = self.info)
case _ =>
tp
}
/*
tp match {
//create companions for value classes that are not from currently compiled source file
case tp@ClassInfo(_, cls, _, decls, _)
if (ValueClasses.isDerivedValueClass(cls)) &&
!sym.isDefinedInCurrentRun && sym.scalacLinkedClass == NoSymbol =>
val newDecls = decls.cloneScope
val (modul, mcMethod, symMethod) = newCompanion(sym.name.toTermName, sym)
modul.entered
mcMethod.entered
newDecls.enter(symMethod)
tp.derivedClassInfo(decls = newDecls)
case _ => tp
}
}
*/
override def checkPostCondition(tree: Tree)(implicit ctx: Context): Unit = {
tree match {
case Select(qual, name) if !name.is(OuterSelectName) && tree.symbol.exists =>
assert(qual.tpe derivesFrom tree.symbol.owner, i"non member selection of ${tree.symbol.showLocated} from ${qual.tpe} in $tree")
case _: TypeTree =>
case _: Import | _: NamedArg | _: TypTree =>
assert(false, i"illegal tree: $tree")
case _ =>
}
}
/** Reorder statements so that module classes always come after their companion classes, add missing companion classes */
private def reorderAndComplete(stats: List[Tree])(implicit ctx: Context): List[Tree] = {
val moduleClassDefs, singleClassDefs = mutable.Map[Name, Tree]()
def reorder(stats: List[Tree]): List[Tree] = stats match {
case (stat: TypeDef) :: stats1 if stat.symbol.isClass =>
if (stat.symbol is Flags.Module) {
moduleClassDefs += (stat.name -> stat)
singleClassDefs -= stat.name.stripModuleClassSuffix
val stats1r = reorder(stats1)
if (moduleClassDefs contains stat.name) stat :: stats1r else stats1r
} else {
def stats1r = reorder(stats1)
val normalized = moduleClassDefs remove stat.name.moduleClassName match {
case Some(mcdef) =>
mcdef :: stats1r
case None =>
singleClassDefs += (stat.name -> stat)
stats1r
}
stat :: normalized
}
case stat :: stats1 => stat :: reorder(stats1)
case Nil => Nil
}
def registerCompanion(name: TermName, forClass: Symbol): TermSymbol = {
val (modul, mcCompanion, classCompanion) = newCompanion(name, forClass)
if (ctx.owner.isClass) modul.enteredAfter(thisTransformer)
mcCompanion.enteredAfter(thisTransformer)
classCompanion.enteredAfter(thisTransformer)
modul
}
def addMissingCompanions(stats: List[Tree]): List[Tree] = stats map {
case stat: TypeDef if (singleClassDefs contains stat.name) && needsCompanion(stat.symbol.asClass) =>
val objName = stat.name.toTermName
val nameClash = stats.exists {
case other: MemberDef =>
other.name == objName && other.symbol.info.isParameterless
case _ =>
false
}
val uniqueName = if (nameClash) AvoidClashName(objName) else objName
Thicket(stat :: ModuleDef(registerCompanion(uniqueName, stat.symbol), Nil).trees)
case stat => stat
}
addMissingCompanions(reorder(stats))
}
private def newCompanion(name: TermName, forClass: Symbol)(implicit ctx: Context) = {
val modul = ctx.newCompleteModuleSymbol(forClass.owner, name, Synthetic, Synthetic,
defn.ObjectType :: Nil, Scopes.newScope, assocFile = forClass.asClass.assocFile)
val mc = modul.moduleClass
val mcComp = ctx.synthesizeCompanionMethod(nme.COMPANION_CLASS_METHOD, forClass, mc)
val classComp = ctx.synthesizeCompanionMethod(nme.COMPANION_MODULE_METHOD, mc, forClass)
(modul, mcComp, classComp)
}
/** elimiate self in Template */
override def transformTemplate(impl: Template)(implicit ctx: Context, info: TransformerInfo): Tree = {
cpy.Template(impl)(self = EmptyValDef)
}
override def transformDefDef(ddef: DefDef)(implicit ctx: Context, info: TransformerInfo) = {
if (ddef.symbol.hasAnnotation(defn.NativeAnnot)) {
ddef.symbol.resetFlag(Deferred)
DefDef(ddef.symbol.asTerm,
_ => ref(defn.Sys_errorR).withPos(ddef.pos)
.appliedTo(Literal(Constant("native method stub"))))
} else ddef
}
override def transformValDef(vdef: tpd.ValDef)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
if (vdef.tpt.tpe.isPhantom) {
if (vdef.symbol.is(Mutable)) ctx.error("var fields cannot have Phantom types", vdef.pos)
else if (vdef.symbol.hasAnnotation(defn.VolatileAnnot)) ctx.error("Phantom fields cannot be @volatile", vdef.pos)
}
vdef
}
override def transformStats(trees: List[Tree])(implicit ctx: Context, info: TransformerInfo): List[Tree] =
ast.Trees.flatten(reorderAndComplete(trees)(ctx.withPhase(thisTransformer.next)))
override def transformOther(tree: Tree)(implicit ctx: Context, info: TransformerInfo) = tree match {
case tree: Import => EmptyTree
case tree: NamedArg => transform(tree.arg)
case tree => if (tree.isType) TypeTree(tree.tpe).withPos(tree.pos) else tree
}
override def transformIdent(tree: Ident)(implicit ctx: Context, info: TransformerInfo) =
if (tree.isType) TypeTree(tree.tpe).withPos(tree.pos)
else constToLiteral(tree)
override def transformSelect(tree: Select)(implicit ctx: Context, info: TransformerInfo) =
if (tree.isType) TypeTree(tree.tpe).withPos(tree.pos)
else constToLiteral(tree)
override def transformTypeApply(tree: TypeApply)(implicit ctx: Context, info: TransformerInfo) =
constToLiteral(tree)
override def transformApply(tree: Apply)(implicit ctx: Context, info: TransformerInfo) =
constToLiteral(tree)
override def transformTyped(tree: Typed)(implicit ctx: Context, info: TransformerInfo) =
constToLiteral(tree)
override def transformBlock(tree: Block)(implicit ctx: Context, info: TransformerInfo) =
constToLiteral(tree)
override def transformIf(tree: If)(implicit ctx: Context, info: TransformerInfo) =
tree.cond match {
case Literal(Constant(c: Boolean)) => if (c) tree.thenp else tree.elsep
case _ => tree
}
// invariants: all modules have companion objects
// all types are TypeTrees
// all this types are explicit
}
|
ihji/dotty
|
tests/pos/overloaded.scala
|
<gh_stars>0
object overloaded {
def f(x: String): String = x
def f[T >: Null](x: T): Int = 1
val x1 = f("abc")
val x2 = f(new Integer(1))
val x3 = f(null)
val x4: String => String = f
val x5: String => Any = f
val x6: Any = f _
def g(): Int = 1
def g(x: Int): Int = 2
val y1: Int => Int = g
val y2: Any = g _
println(g())
val xs = List("a", "b")
xs.mkString
def map(f: Char => Char): String = ???
def map[U](f: Char => U): Seq[U] = ???
val r1 = map(x => x.toUpper)
val t1: String = r1
val r2 = map(x => x.toInt)
val t2: Seq[Int] = r2
def flatMap(f: Char => String): String = ???
def flatMap[U](f: Char => Seq[U]): Seq[U] = ???
val r3 = flatMap(x => x.toString)
val t3: String = r3
val r4 = flatMap(x => List(x))
val t4: Seq[Char] = r4
def bar(f: (Char, Char) => Unit): Unit = ???
def bar(f: Char => Unit) = ???
bar((x, y) => ())
bar (x => ())
def combine(f: (Char, Int) => Int): Int = ???
def combine(f: (String, Int) => String): String = ???
val r5 = combine((x: Char, y) => x + y)
val t5: Int = r5
val r6 = combine((x: String, y) => x ++ y.toString)
val t6: String = r6
}
|
ihji/dotty
|
tests/idempotency/IdempotencyCheck.scala
|
<reponame>ihji/dotty
import java.nio.file.{ Files => JFiles, Path => JPath, Paths => JPaths }
import java.util.stream.{ Stream => JStream }
import scala.collection.JavaConverters._
object IdempotencyCheck {
val blacklisted = Set(
// No fix needed. Bridges on collections in different order. Second one in scala2 order.
"pos/Map/scala/collection/immutable/Map",
"pos/Map/scala/collection/immutable/AbstractMap",
"pos/t1203a/NodeSeq",
"pos/i2345/Whatever"
)
def checkIdempotency(dirPrefix: String): Unit = {
var failed = 0
var total = 0
val groupedBytecodeFiles: List[(JPath, JPath, JPath, JPath)] = {
val bytecodeFiles = {
def bytecodeFiles(paths: JStream[JPath]): List[JPath] = {
def isBytecode(file: String) = file.endsWith(".class") || file.endsWith(".tasty")
paths.iterator.asScala.filter(path => isBytecode(path.toString)).toList
}
val compilerDir1 = JPaths.get(dirPrefix + 1)
val compilerDir2 = JPaths.get(dirPrefix + 2)
bytecodeFiles(JFiles.walk(compilerDir1)) ++ bytecodeFiles(JFiles.walk(compilerDir2))
}
val groups = bytecodeFiles.groupBy(f => f.toString.substring(dirPrefix.length + 1, f.toString.length - 6))
groups.filterNot(x => blacklisted(x._1)).valuesIterator.flatMap { g =>
def pred(f: JPath, i: Int, isTasty: Boolean) =
f.toString.contains(dirPrefix + i) && f.toString.endsWith(if (isTasty) ".tasty" else ".class")
val class1 = g.find(f => pred(f, 1, isTasty = false))
val class2 = g.find(f => pred(f, 2, isTasty = false))
val tasty1 = g.find(f => pred(f, 1, isTasty = true))
val tasty2 = g.find(f => pred(f, 2, isTasty = true))
assert(class1.isDefined, s"Could not find class in ${dirPrefix + 1} for $class2")
assert(class2.isDefined, s"Could not find class in ${dirPrefix + 2} for $class1")
if (tasty1.isEmpty || tasty2.isEmpty) Nil
else List(Tuple4(class1.get, tasty1.get, class2.get, tasty2.get))
}.toList
}
for ((class1, tasty1, class2, tasty2) <- groupedBytecodeFiles) {
total += 1
val bytes1 = JFiles.readAllBytes(class1)
val bytes2 = JFiles.readAllBytes(class2)
if (!java.util.Arrays.equals(bytes1, bytes2)) {
failed += 1
val tastyBytes1 = JFiles.readAllBytes(tasty1)
val tastyBytes2 = JFiles.readAllBytes(tasty2)
if (java.util.Arrays.equals(tastyBytes1, tastyBytes2))
println(s"Idempotency test failed between $class1 and $class1 (same tasty)")
else
println(s"Idempotency test failed between $tasty1 and $tasty2")
/* Dump bytes to console, could be useful if issue only appears in CI.
* Create the .class locally with JFiles.write(path, Array[Byte](...)) with the printed array
*/
// println(bytes1.mkString("Array[Byte](", ",", ")"))
// println(bytes2.mkString("Array[Byte](", ",", ")"))
}
}
assert(failed == 0, s"Failed $failed idempotency checks (out of $total)")
}
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/reporting/StoreReporter.scala
|
<filename>compiler/src/dotty/tools/dotc/reporting/StoreReporter.scala
package dotty.tools
package dotc
package reporting
import core.Contexts.Context
import collection.mutable
import config.Printers.typr
import diagnostic.MessageContainer
import diagnostic.messages._
/** This class implements a Reporter that stores all messages
*
* Beware that this reporter can leak memory, and force messages in two
* scenarios:
*
* - During debugging `config.Printers.typr` is set from `noPrinter` to `new
* Printer`, which forces the message
* - The reporter is not flushed and the message containers capture a
* `Context` (about 4MB)
*/
class StoreReporter(outer: Reporter) extends Reporter {
private var infos: mutable.ListBuffer[MessageContainer] = null
def doReport(m: MessageContainer)(implicit ctx: Context): Unit = {
typr.println(s">>>> StoredError: ${m.message}") // !!! DEBUG
if (infos == null) infos = new mutable.ListBuffer
infos += m
}
override def hasPending: Boolean = infos != null && {
infos exists {
case _: Error => true
case _: Warning => true
case _ => false
}
}
override def removeBufferedMessages(implicit ctx: Context): List[MessageContainer] =
if (infos != null) try infos.toList finally infos = null
else Nil
override def errorsReported = hasErrors || (outer != null && outer.errorsReported)
}
|
ihji/dotty
|
tests/neg/phantom-evidence.scala
|
<reponame>ihji/dotty
/* This is a example of how to implement =:= using erasable phantom types.
*
* Run this test with
* `run tests/neg/phantomEvidence-1.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*
* See also: ../pos/phantomEvidence-1.scala
*/
/** In this implementation variant of =:= (called =::=) we erase all instantiations and definitions of =::= */
object WithNormalState extends Phantom {
type =::=[From, To] <: this.Any
implicit inline def tpEquals[A]: A =::= A = assume
trait State
sealed trait On extends State
sealed trait Off extends State
object Instance {
def newInstance(): Instance[Off] = new Instance[Off]
}
class Instance[S <: State] private {
def getOnInstance(implicit ev: S =::= Off): Instance[On] = new Instance[On]
def getOffInstance(implicit ev: S =::= On): Instance[Off] = new Instance[Off]
}
def run() = {
val instance = Instance.newInstance()
instance.getOffInstance // error
instance.getOnInstance.getOnInstance // error
}
}
|
ihji/dotty
|
tests/neg/phantom-classOf-1.scala
|
<filename>tests/neg/phantom-classOf-1.scala
class phantomClassOf {
classOf[BooAny] // error
classOf[BooNothing] // error
}
object Boo extends Phantom {
type BooAny = this.Any
type BooNothing = this.Nothing
}
|
ihji/dotty
|
tests/neg/phantom-type-param-bounds-1.scala
|
class phantomTypeParamBounds1 {
def fun5[X >: Boo.Nothing <: Any] = ??? // error
def fun6[X >: Nothing <: Boo.Any] = ??? // error
}
object Boo extends Phantom {
def boo[B <: this.Any]: B = assume
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/TypeTestsCasts.scala
|
<reponame>ihji/dotty
package dotty.tools.dotc
package transform
import core._
import Contexts._, Symbols._, Types._, Constants._, StdNames._, Decorators._
import ast.Trees._
import Erasure.Boxing._
import TypeErasure._
import ValueClasses._
import SymUtils._
import core.Flags._
import util.Positions._
/** This transform normalizes type tests and type casts,
* also replacing type tests with singleton argument type with reference equality check
* Any remaining type tests
* - use the object methods $isInstanceOf and $asInstanceOf
* - have a reference type as receiver
* - can be translated directly to machine instructions
*
*
* Unfortunately this phase ended up being not Y-checkable unless types are erased. A cast to an ConstantType(3) or x.type
* cannot be rewritten before erasure.
*/
object TypeTestsCasts {
import ast.tpd._
def interceptTypeApply(tree: TypeApply)(implicit ctx: Context): Tree = ctx.traceIndented(s"transforming ${tree.show}", show = true) {
tree.fun match {
case fun @ Select(expr, selector) =>
val sym = tree.symbol
def isPrimitive(tp: Type) = tp.classSymbol.isPrimitiveValueClass
def derivedTree(expr1: Tree, sym: Symbol, tp: Type) =
cpy.TypeApply(tree)(expr1.select(sym).withPos(expr.pos), List(TypeTree(tp)))
def foundCls = expr.tpe.widen.classSymbol
// println(i"ta $tree, found = $foundCls")
def inMatch =
fun.symbol == defn.Any_typeTest || // new scheme
expr.symbol.is(Case) // old scheme
def transformIsInstanceOf(expr:Tree, testType: Type, flagUnrelated: Boolean): Tree = {
def testCls = testType.classSymbol
def unreachable(why: => String) =
if (flagUnrelated)
if (inMatch) ctx.error(em"this case is unreachable since $why", expr.pos)
else ctx.warning(em"this will always yield false since $why", expr.pos)
/** Are `foundCls` and `testCls` classes that allow checks
* whether a test would be always false?
*/
def isCheckable =
foundCls.isClass && testCls.isClass &&
!(testCls.isPrimitiveValueClass && !foundCls.isPrimitiveValueClass) &&
// if `test` is primitive but `found` is not, we might have a case like
// found = java.lang.Integer, test = Int, which could be true
// (not sure why that is so, but scalac behaves the same way)
!isDerivedValueClass(foundCls) && !isDerivedValueClass(testCls)
// we don't have the logic to handle derived value classes
/** Check whether a runtime test that a value of `foundCls` can be a `testCls`
* can be true in some cases. Issure a warning or an error if that's not the case.
*/
def checkSensical: Boolean =
if (!isCheckable) true
else if (foundCls.isPrimitiveValueClass && !testCls.isPrimitiveValueClass) {
ctx.error("cannot test if value types are references", tree.pos)
false
}
else if (!foundCls.derivesFrom(testCls)) {
if (foundCls.is(Final)) {
unreachable(i"$foundCls is not a subclass of $testCls")
false
}
else if (!testCls.derivesFrom(foundCls) &&
(testCls.is(Final) ||
!testCls.is(Trait) && !foundCls.is(Trait))) {
unreachable(i"$foundCls and $testCls are unrelated")
false
}
else true
}
else true
if (expr.tpe <:< testType)
if (expr.tpe.isNotNull) {
ctx.warning(
em"this will always yield true, since `$foundCls` is a subclass of `$testCls`",
expr.pos)
constant(expr, Literal(Constant(true)))
}
else expr.testNotNull
else if (!checkSensical)
constant(expr, Literal(Constant(false)))
else if (testCls.isPrimitiveValueClass)
if (foundCls.isPrimitiveValueClass)
constant(expr, Literal(Constant(foundCls == testCls)))
else
transformIsInstanceOf(expr, defn.boxedType(testCls.typeRef), flagUnrelated)
else
derivedTree(expr, defn.Any_isInstanceOf, testType)
}
def transformAsInstanceOf(testType: Type): Tree = {
def testCls = testType.widen.classSymbol
if (expr.tpe <:< testType)
Typed(expr, tree.args.head)
else if (foundCls.isPrimitiveValueClass) {
if (testCls.isPrimitiveValueClass) primitiveConversion(expr, testCls)
else derivedTree(box(expr), defn.Any_asInstanceOf, testType)
}
else if (testCls.isPrimitiveValueClass)
unbox(expr.ensureConforms(defn.ObjectType), testType)
else if (isDerivedValueClass(testCls)) {
expr // adaptToType in Erasure will do the necessary type adaptation
}
else
derivedTree(expr, defn.Any_asInstanceOf, testType)
}
/** Transform isInstanceOf OrType
*
* expr.isInstanceOf[A | B] ~~> expr.isInstanceOf[A] | expr.isInstanceOf[B]
* expr.isInstanceOf[A & B] ~~> expr.isInstanceOf[A] & expr.isInstanceOf[B]
*
* The transform happens before erasure of `testType`, thus cannot be merged
* with `transformIsInstanceOf`, which depends on erased type of `testType`.
*/
def transformTypeTest(expr: Tree, testType: Type, flagUnrelated: Boolean): Tree = testType.dealias match {
case _: SingletonType =>
expr.isInstance(testType).withPos(tree.pos)
case OrType(tp1, tp2) =>
evalOnce(expr) { e =>
transformTypeTest(e, tp1, flagUnrelated = false)
.or(transformTypeTest(e, tp2, flagUnrelated = false))
}
case AndType(tp1, tp2) =>
evalOnce(expr) { e =>
transformTypeTest(e, tp1, flagUnrelated)
.and(transformTypeTest(e, tp2, flagUnrelated))
}
case defn.MultiArrayOf(elem, ndims) if isUnboundedGeneric(elem) =>
def isArrayTest(arg: Tree) =
ref(defn.runtimeMethodRef(nme.isArray)).appliedTo(arg, Literal(Constant(ndims)))
if (ndims == 1) isArrayTest(expr)
else evalOnce(expr) { e =>
derivedTree(e, defn.Any_isInstanceOf, e.tpe)
.and(isArrayTest(e))
}
case _ =>
transformIsInstanceOf(expr, erasure(testType), flagUnrelated)
}
if (sym.isTypeTest)
transformTypeTest(expr, tree.args.head.tpe, flagUnrelated = true)
else if (sym eq defn.Any_asInstanceOf)
transformAsInstanceOf(erasure(tree.args.head.tpe))
else tree
case _ =>
tree
}
}
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/Constructors.scala
|
package dotty.tools.dotc
package transform
import core._
import TreeTransforms._
import dotty.tools.dotc.ast.tpd._
import dotty.tools.dotc.core.Contexts.Context
import dotty.tools.dotc.core.StdNames._
import Phases._
import ast._
import Trees._
import Flags._
import SymUtils._
import Symbols._
import SymDenotations._
import Types._
import Decorators._
import DenotTransformers._
import util.Positions._
import Constants.Constant
import collection.mutable
/** This transform
* - moves initializers from body to constructor.
* - makes all supercalls explicit
* - also moves private fields that are accessed only from constructor
* into the constructor if possible.
*/
class Constructors extends MiniPhaseTransform with IdentityDenotTransformer { thisTransform =>
import tpd._
override def phaseName: String = "constructors"
override def runsAfter: Set[Class[_ <: Phase]] = Set(classOf[HoistSuperArgs])
override def runsAfterGroupsOf: Set[Class[_ <: Phase]] = Set(classOf[Memoize])
// Memoized needs to be finished because we depend on the ownerchain after Memoize
// when checking whether an ident is an access in a constructor or outside it.
// This test is done in the right-hand side of a value definition. If Memoize
// was in the same group as Constructors, the test on the rhs ident would be
// performed before the rhs undergoes the owner change. This would lead
// to more symbols being retained as parameters. Test case in run/capturing.scala.
/** The private vals that are known to be retained as class fields */
private val retainedPrivateVals = mutable.Set[Symbol]()
/** The private vals whose definition comes before the current focus */
private val seenPrivateVals = mutable.Set[Symbol]()
// Collect all private parameter accessors and value definitions that need
// to be retained. There are several reasons why a parameter accessor or
// definition might need to be retained:
// 1. It is accessed after the constructor has finished
// 2. It is accessed before it is defined
// 3. It is accessed on an object other than `this`
// 4. It is a mutable parameter accessor
// 5. It is has a wildcard initializer `_`
private def markUsedPrivateSymbols(tree: RefTree)(implicit ctx: Context): Unit = {
val sym = tree.symbol
def retain() = retainedPrivateVals.add(sym)
if (sym.exists && sym.owner.isClass && mightBeDropped(sym)) {
val owner = sym.owner.asClass
tree match {
case Ident(_) | Select(This(_), _) =>
def inConstructor = {
val method = ctx.owner.enclosingMethod
method.isPrimaryConstructor && ctx.owner.enclosingClass == owner
}
if (inConstructor &&
(sym.is(ParamAccessor) || seenPrivateVals.contains(sym))) {
// used inside constructor, accessed on this,
// could use constructor argument instead, no need to retain field
}
else retain()
case _ => retain()
}
}
}
override def transformIdent(tree: tpd.Ident)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
markUsedPrivateSymbols(tree)
tree
}
override def transformSelect(tree: tpd.Select)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
markUsedPrivateSymbols(tree)
tree
}
override def transformValDef(tree: tpd.ValDef)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
if (mightBeDropped(tree.symbol)) seenPrivateVals += tree.symbol
tree
}
/** All initializers for non-lazy fields should be moved into constructor.
* All non-abstract methods should be implemented (this is assured for constructors
* in this phase and for other methods in memoize).
*/
override def checkPostCondition(tree: tpd.Tree)(implicit ctx: Context): Unit = {
tree match {
case tree: ValDef if tree.symbol.exists && tree.symbol.owner.isClass && !tree.symbol.is(Lazy) && !tree.symbol.hasAnnotation(defn.ScalaStaticAnnot) =>
assert(tree.rhs.isEmpty, i"$tree: initializer should be moved to constructors")
case tree: DefDef if !tree.symbol.is(LazyOrDeferred) =>
assert(!tree.rhs.isEmpty, i"unimplemented: $tree")
case _ =>
}
}
/** @return true if after ExplicitOuter, all references from this tree go via an
* outer link, so no parameter accessors need to be rewired to parameters
*/
private def noDirectRefsFrom(tree: Tree)(implicit ctx: Context) =
tree.isDef && tree.symbol.isClass
/** Class members that can be eliminated if referenced only from their own
* constructor.
*/
private def mightBeDropped(sym: Symbol)(implicit ctx: Context) =
sym.is(Private, butNot = MethodOrLazy) && !sym.is(MutableParamAccessor)
private final val MutableParamAccessor = allOf(Mutable, ParamAccessor)
override def transformTemplate(tree: Template)(implicit ctx: Context, info: TransformerInfo): Tree = {
val cls = ctx.owner.asClass
val constr @ DefDef(nme.CONSTRUCTOR, Nil, vparams :: Nil, _, EmptyTree) = tree.constr
// Produce aligned accessors and constructor parameters. We have to adjust
// for any outer parameters, which are last in the sequence of original
// parameter accessors but come first in the constructor parameter list.
val accessors = cls.paramAccessors.filterNot(_.isSetter)
val vparamsWithOuterLast = vparams match {
case vparam :: rest if vparam.name == nme.OUTER => rest ::: vparam :: Nil
case _ => vparams
}
val paramSyms = vparamsWithOuterLast map (_.symbol)
// Adjustments performed when moving code into the constructor:
// (1) Replace references to param accessors by constructor parameters
// except possibly references to mutable variables, if `excluded = Mutable`.
// (Mutable parameters should be replaced only during the super call)
// (2) If the parameter accessor reference was to an alias getter,
// drop the () when replacing by the parameter.
object intoConstr extends TreeMap {
override def transform(tree: Tree)(implicit ctx: Context): Tree = tree match {
case Ident(_) | Select(This(_), _) =>
var sym = tree.symbol
if (sym is (ParamAccessor, butNot = Mutable)) sym = sym.subst(accessors, paramSyms)
if (sym.owner.isConstructor) ref(sym).withPos(tree.pos) else tree
case Apply(fn, Nil) =>
val fn1 = transform(fn)
if ((fn1 ne fn) && fn1.symbol.is(Param) && fn1.symbol.owner.isPrimaryConstructor)
fn1 // in this case, fn1.symbol was an alias for a parameter in a superclass
else cpy.Apply(tree)(fn1, Nil)
case _ =>
if (noDirectRefsFrom(tree)) tree else super.transform(tree)
}
def apply(tree: Tree, prevOwner: Symbol)(implicit ctx: Context): Tree = {
transform(tree).changeOwnerAfter(prevOwner, constr.symbol, thisTransform)
}
}
def isRetained(acc: Symbol) = {
!mightBeDropped(acc) || retainedPrivateVals(acc)
}
val constrStats, clsStats = new mutable.ListBuffer[Tree]
/** Map outer getters $outer and outer accessors $A$B$$$outer to the given outer parameter. */
def mapOuter(outerParam: Symbol) = new TreeMap {
override def transform(tree: Tree)(implicit ctx: Context) = tree match {
case Apply(fn, Nil)
if (fn.symbol.is(OuterAccessor)
|| fn.symbol.isGetter && fn.symbol.name == nme.OUTER
) &&
fn.symbol.info.resultType.classSymbol == outerParam.info.classSymbol =>
ref(outerParam)
case _ =>
super.transform(tree)
}
}
val dropped = mutable.Set[Symbol]()
// Split class body into statements that go into constructor and
// definitions that are kept as members of the class.
def splitStats(stats: List[Tree]): Unit = stats match {
case stat :: stats1 =>
stat match {
case stat @ ValDef(name, tpt, _) if !stat.symbol.is(Lazy) && !stat.symbol.hasAnnotation(defn.ScalaStaticAnnot) =>
val sym = stat.symbol
if (isRetained(sym)) {
if (!stat.rhs.isEmpty && !isWildcardArg(stat.rhs))
constrStats += Assign(ref(sym), intoConstr(stat.rhs, sym)).withPos(stat.pos)
clsStats += cpy.ValDef(stat)(rhs = EmptyTree)
}
else if (!stat.rhs.isEmpty) {
dropped += sym
sym.copySymDenotation(
initFlags = sym.flags &~ Private,
owner = constr.symbol).installAfter(thisTransform)
constrStats += intoConstr(stat, sym)
}
case DefDef(nme.CONSTRUCTOR, _, ((outerParam @ ValDef(nme.OUTER, _, _)) :: _) :: Nil, _, _) =>
clsStats += mapOuter(outerParam.symbol).transform(stat)
case _: DefTree =>
clsStats += stat
case _ =>
constrStats += intoConstr(stat, tree.symbol)
}
splitStats(stats1)
case Nil =>
(Nil, Nil)
}
splitStats(tree.body)
// The initializers for the retained accessors */
val copyParams = accessors flatMap { acc =>
if (!isRetained(acc)) {
dropped += acc
Nil
} else {
val target = if (acc.is(Method)) acc.field else acc
if (!target.exists) Nil // this case arises when the parameter accessor is an alias
else {
val param = acc.subst(accessors, paramSyms)
val assigns = Assign(ref(target), ref(param)).withPos(tree.pos) :: Nil
if (acc.name != nme.OUTER) assigns
else {
// insert test: if ($outer eq null) throw new NullPointerException
val nullTest =
If(ref(param).select(defn.Object_eq).appliedTo(Literal(Constant(null))),
Throw(New(defn.NullPointerExceptionClass.typeRef, Nil)),
unitLiteral)
nullTest :: assigns
}
}
}
}
// Drop accessors that are not retained from class scope
if (dropped.nonEmpty) {
val clsInfo = cls.classInfo
cls.copy(
info = clsInfo.derivedClassInfo(
decls = clsInfo.decls.filteredScope(!dropped.contains(_))))
// TODO: this happens to work only because Constructors is the last phase in group
}
val (superCalls, followConstrStats) = constrStats.toList match {
case (sc: Apply) :: rest if sc.symbol.isConstructor => (sc :: Nil, rest)
case stats => (Nil, stats)
}
val mappedSuperCalls = vparams match {
case (outerParam @ ValDef(nme.OUTER, _, _)) :: _ =>
superCalls.map(mapOuter(outerParam.symbol).transform)
case _ => superCalls
}
// Lazy Vals may decide to create an eager val instead of a lazy val
// this val should be assigned before constructor body code starts running
val (lazyAssignments, stats) = followConstrStats.partition {
case Assign(l, r) if l.symbol.name.is(NameKinds.LazyLocalName) => true
case _ => false
}
cpy.Template(tree)(
constr = cpy.DefDef(constr)(
rhs = Block(copyParams ::: mappedSuperCalls ::: lazyAssignments ::: stats, unitLiteral)),
body = clsStats.toList)
}
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/core/Periods.scala
|
package dotty.tools.dotc.core
import Contexts._
import dotty.tools.dotc.util.DotClass
/** Periods are the central "clock" of the compiler.
* A period consists of a run id and a phase id.
* run ids represent compiler runs
* phase ids represent compiler phases
*/
abstract class Periods extends DotClass { self: Context =>
import Periods._
/** The current phase identifier */
def phaseId: Int = period.phaseId
/** The current run identifier */
def runId: Int = period.runId
/** Execute `op` at given period */
def atPeriod[T](pd: Period)(op: Context => T): T =
op(ctx.fresh.setPeriod(pd))
/** Execute `op` at given phase id */
def atPhase[T](pid: PhaseId)(op: Context => T): T =
op(ctx.withPhase(pid))
/** The period containing the current period where denotations do not change.
* We compute this by taking as first phase the first phase less or equal to
* the current phase that has the same "nextTransformerId". As last phase
* we take the next transformer id following the current phase.
*/
def stablePeriod = {
var first = phaseId
val nxTrans = ctx.base.nextDenotTransformerId(first)
while (first - 1 > NoPhaseId && (ctx.base.nextDenotTransformerId(first - 1) == nxTrans)) {
first -= 1
}
Period(runId, first, nxTrans)
}
}
object Periods {
/** A period is a contiguous sequence of phase ids in some run.
* It is coded as follows:
*
* sign, always 0 1 bit
* runid 17 bits
* last phase id: 7 bits
* #phases before last: 7 bits
*
* // Dmitry: sign == 0 isn't actually always true, in some cases phaseId == -1 is used for shifts, that easily creates code < 0
*/
class Period(val code: Int) extends AnyVal {
/** The run identifier of this period. */
def runId: RunId = code >>> (PhaseWidth * 2)
/** The phase identifier of this single-phase period. */
def phaseId: PhaseId = (code >>> PhaseWidth) & PhaseMask
/** The last phase of this period */
def lastPhaseId: PhaseId =
(code >>> PhaseWidth) & PhaseMask
/** The first phase of this period */
def firstPhaseId = lastPhaseId - (code & PhaseMask)
def containsPhaseId(id: PhaseId) = firstPhaseId <= id && id <= lastPhaseId
/** Does this period contain given period? */
def contains(that: Period): Boolean = {
// Let this = (r1, l1, d1), that = (r2, l2, d2)
// where r = runid, l = last phase, d = duration - 1
// Then seen as intervals:
//
// this = r1 / (l1 - d1) .. l1
// that = r2 / (l2 - d2) .. l2
//
// Let's compute:
//
// lastDiff = X * 2^5 + (l1 - l2) mod 2^5
// where X >= 0, X == 0 iff r1 == r2 & l1 - l2 >= 0
// result = lastDiff + d2 <= d1
// We have:
// lastDiff + d2 <= d1
// iff X == 0 && l1 - l2 >= 0 && l1 - l2 + d2 <= d1
// iff r1 == r2 & l1 >= l2 && l1 - d1 <= l2 - d2
// q.e.d
val lastDiff = (code - that.code) >>> PhaseWidth
lastDiff + (that.code & PhaseMask ) <= (this.code & PhaseMask)
}
/** Does this period overlap with given period? */
def overlaps(that: Period): Boolean =
this.runId == that.runId &&
this.firstPhaseId <= that.lastPhaseId &&
that.firstPhaseId <= this.lastPhaseId
/** The intersection of two periods */
def & (that: Period): Period =
if (this overlaps that)
Period(
this.runId,
this.firstPhaseId max that.firstPhaseId,
this.lastPhaseId min that.lastPhaseId)
else
Nowhere
/** The smallest period containing two periods */
def | (that: Period): Period =
Period(this.runId,
this.firstPhaseId min that.firstPhaseId,
this.lastPhaseId max that.lastPhaseId)
override def toString = s"Period($firstPhaseId..$lastPhaseId, run = $runId)"
}
object Period {
/** The single-phase period consisting of given run id and phase id */
def apply(rid: RunId, pid: PhaseId): Period = {
new Period(((rid << PhaseWidth) | pid) << PhaseWidth)
}
/** The period consisting of given run id, and lo/hi phase ids */
def apply(rid: RunId, loPid: PhaseId, hiPid: PhaseId): Period = {
new Period(((rid << PhaseWidth) | hiPid) << PhaseWidth | (hiPid - loPid))
}
/** The interval consisting of all periods of given run id */
def allInRun(rid: RunId) = {
apply(rid, 0, PhaseMask)
}
}
final val Nowhere = new Period(0)
final val InitialPeriod = Period(InitialRunId, FirstPhaseId)
final val InvalidPeriod = Period(NoRunId, NoPhaseId)
/** An ordinal number for compiler runs. First run has number 1. */
type RunId = Int
final val NoRunId = 0
final val InitialRunId = 1
final val RunWidth = java.lang.Integer.SIZE - PhaseWidth * 2 - 1/* sign */
final val MaxPossibleRunId = (1 << RunWidth) - 1
/** An ordinal number for phases. First phase has number 1. */
type PhaseId = Int
final val NoPhaseId = 0
final val FirstPhaseId = 1
/** The number of bits needed to encode a phase identifier. */
final val PhaseWidth = 7
final val PhaseMask = (1 << PhaseWidth) - 1
final val MaxPossiblePhaseId = PhaseMask
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/ExtensionMethods.scala
|
<gh_stars>0
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author <NAME>
*/
package dotty.tools.dotc
package transform
import dotty.tools.dotc.transform.TreeTransforms._
import ValueClasses._
import dotty.tools.dotc.ast.{Trees, tpd}
import scala.collection.{ mutable, immutable }
import mutable.ListBuffer
import core._
import dotty.tools.dotc.core.Phases.{NeedsCompanions, Phase}
import Types._, Contexts._, Constants._, Names._, NameOps._, Flags._, DenotTransformers._
import SymDenotations._, Symbols._, StdNames._, Annotations._, Trees._, Scopes._, Denotations._
import TypeErasure.{ valueErasure, ErasedValueType }
import TypeUtils._
import NameKinds.{ExtMethName, UniqueExtMethName}
import util.Positions._
import Decorators._
import SymUtils._
/**
* Perform Step 1 in the inline classes SIP: Creates extension methods for all
* methods in a value class, except parameter or super accessors, or constructors.
*
* Additionally, for a value class V, let U be the underlying type after erasure. We add
* to the companion module of V two cast methods:
* def u2evt$(x0: U): ErasedValueType(V, U)
* def evt2u$(x0: ErasedValueType(V, U)): U
* The casts are used in [[Erasure]] to make it typecheck, they are then removed
* in [[ElimErasedValueType]].
* This is different from the implementation of value classes in Scala 2
* (see SIP-15) which uses `asInstanceOf` which does not typecheck.
*
* Finally, if the constructor of a value class is private pr protected
* it is widened to public.
*/
class ExtensionMethods extends MiniPhaseTransform with DenotTransformer with FullParameterization { thisTransformer =>
import tpd._
import ExtensionMethods._
/** the following two members override abstract members in Transform */
override def phaseName: String = "extmethods"
override def runsAfter: Set[Class[_ <: Phase]] = Set(classOf[ElimRepeated])
override def runsAfterGroupsOf = Set(classOf[FirstTransform]) // need companion objects to exist
override def changesMembers = true // the phase adds extension methods
override def transform(ref: SingleDenotation)(implicit ctx: Context): SingleDenotation = ref match {
case moduleClassSym: ClassDenotation if moduleClassSym is ModuleClass =>
moduleClassSym.linkedClass match {
case valueClass: ClassSymbol if isDerivedValueClass(valueClass) =>
val cinfo = moduleClassSym.classInfo
val decls1 = cinfo.decls.cloneScope
val moduleSym = moduleClassSym.symbol.asClass
var newSuperClass: Type = null
ctx.atPhase(thisTransformer.next) { implicit ctx =>
// In Scala 2, extension methods are added before pickling so we should
// not generate them again.
if (!(valueClass is Scala2x)) ctx.atPhase(thisTransformer) { implicit ctx =>
for (decl <- valueClass.classInfo.decls) {
if (isMethodWithExtension(decl)) {
val meth = createExtensionMethod(decl, moduleClassSym.symbol)
decls1.enter(meth)
// Workaround #1895: force denotation of `meth` to be
// at phase where `meth` is entered into the decls of a class
meth.denot(ctx.withPhase(thisTransformer.next))
}
}
}
val underlying = valueErasure(underlyingOfValueClass(valueClass))
val evt = ErasedValueType(valueClass.typeRef, underlying)
val u2evtSym = ctx.newSymbol(moduleSym, nme.U2EVT, Synthetic | Method,
MethodType(List(nme.x_0), List(underlying), evt))
val evt2uSym = ctx.newSymbol(moduleSym, nme.EVT2U, Synthetic | Method,
MethodType(List(nme.x_0), List(evt), underlying))
val defn = ctx.definitions
val underlyingCls = underlying.classSymbol
val underlyingClsName =
if (underlyingCls.isNumericValueClass || underlyingCls == defn.BooleanClass) underlyingCls.name
else nme.Object
val syp = ctx.requiredClass(s"dotty.runtime.vc.VC${underlyingClsName}Companion").asClass
newSuperClass = tpd.ref(syp).select(nme.CONSTRUCTOR).appliedToType(valueClass.typeRef).tpe.resultType
decls1.enter(u2evtSym)
decls1.enter(evt2uSym)
}
// Add the extension methods, the cast methods u2evt$ and evt2u$, and a VC*Companion superclass
moduleClassSym.copySymDenotation(info =
cinfo.derivedClassInfo(
// FIXME: use of VC*Companion superclasses is disabled until the conflicts with SyntheticMethods are solved.
//classParents = ctx.normalizeToClassRefs(List(newSuperClass), moduleSym, decls1),
decls = decls1))
case _ =>
moduleClassSym
}
case ref: SymDenotation =>
if (isMethodWithExtension(ref) && ref.hasAnnotation(defn.TailrecAnnot)) {
val ref1 = ref.copySymDenotation()
ref1.removeAnnotation(defn.TailrecAnnot)
ref1
}
else if (ref.isConstructor && isDerivedValueClass(ref.owner) && ref.is(AccessFlags)) {
val ref1 = ref.copySymDenotation()
ref1.resetFlag(AccessFlags)
ref1
}
else ref
case _ =>
ref
}
protected def rewiredTarget(target: Symbol, derived: Symbol)(implicit ctx: Context): Symbol =
if (isMethodWithExtension(target) &&
target.owner.linkedClass == derived.owner) extensionMethod(target)
else NoSymbol
private def createExtensionMethod(imeth: Symbol, staticClass: Symbol)(implicit ctx: Context): TermSymbol = {
val extensionName = extensionNames(imeth).head.toTermName
val extensionMeth = ctx.newSymbol(staticClass, extensionName,
imeth.flags | Final &~ (Override | Protected | AbsOverride),
fullyParameterizedType(imeth.info, imeth.owner.asClass),
privateWithin = imeth.privateWithin, coord = imeth.coord)
extensionMeth.addAnnotations(imeth.annotations)(ctx.withPhase(thisTransformer))
// need to change phase to add tailrec annotation which gets removed from original method in the same phase.
extensionMeth
}
private val extensionDefs = mutable.Map[Symbol, mutable.ListBuffer[Tree]]()
// TODO: this is state and should be per-run
// todo: check that when transformation finished map is empty
override def transformTemplate(tree: tpd.Template)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
if (isDerivedValueClass(ctx.owner)) {
/* This is currently redundant since value classes may not
wrap over other value classes anyway.
checkNonCyclic(ctx.owner.pos, Set(), ctx.owner) */
tree
} else if (ctx.owner.isStaticOwner) {
extensionDefs remove tree.symbol.owner match {
case Some(defns) if defns.nonEmpty =>
cpy.Template(tree)(body = tree.body ++
defns.map(transformFollowing(_)))
case _ =>
tree
}
} else tree
}
override def transformDefDef(tree: tpd.DefDef)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
if (isMethodWithExtension(tree.symbol)) {
val origMeth = tree.symbol
val origClass = ctx.owner.asClass
val staticClass = origClass.linkedClass
assert(staticClass.exists, s"$origClass lacks companion, ${origClass.owner.definedPeriodsString} ${origClass.owner.info.decls} ${origClass.owner.info.decls}")
val extensionMeth = extensionMethod(origMeth)
ctx.log(s"Value class $origClass spawns extension method.\n Old: ${origMeth.showDcl}\n New: ${extensionMeth.showDcl}")
val store: ListBuffer[Tree] = extensionDefs.get(staticClass) match {
case Some(x) => x
case None =>
val newC = new ListBuffer[Tree]()
extensionDefs(staticClass) = newC
newC
}
store += atGroupEnd(fullyParameterizedDef(extensionMeth, tree)(_))
cpy.DefDef(tree)(rhs = atGroupEnd(forwarder(extensionMeth, tree)(_)))
} else tree
}
}
object ExtensionMethods {
/** Generate stream of possible names for the extension version of given instance method `imeth`.
* If the method is not overloaded, this stream consists of just "imeth$extension".
* If the method is overloaded, the stream has as first element "imeth$extenionX", where X is the
* index of imeth in the sequence of overloaded alternatives with the same name. This choice will
* always be picked as the name of the generated extension method.
* After this first choice, all other possible indices in the range of 0 until the number
* of overloaded alternatives are returned. The secondary choices are used to find a matching method
* in `extensionMethod` if the first name has the wrong type. We thereby gain a level of insensitivity
* of how overloaded types are ordered between phases and picklings.
*/
private def extensionNames(imeth: Symbol)(implicit ctx: Context): Stream[Name] = {
val decl = imeth.owner.info.decl(imeth.name)
/** No longer needed for Dotty, as we are more disciplined with scopes now.
// Bridge generation is done at phase `erasure`, but new scopes are only generated
// for the phase after that. So bridges are visible in earlier phases.
//
// `info.member(imeth.name)` filters these out, but we need to use `decl`
// to restrict ourselves to members defined in the current class, so we
// must do the filtering here.
val declTypeNoBridge = decl.filter(sym => !sym.isBridge).tpe
*/
decl match {
case decl: MultiDenotation =>
val alts = decl.alternatives
val index = alts indexOf imeth.denot
assert(index >= 0, alts + " does not contain " + imeth)
def altName(index: Int) = UniqueExtMethName(imeth.name.asTermName, index)
altName(index) #:: ((0 until alts.length).toStream filter (index != _) map altName)
case decl =>
assert(decl.exists, imeth.name + " not found in " + imeth.owner + "'s decls: " + imeth.owner.info.decls)
Stream(ExtMethName(imeth.name.asTermName))
}
}
/** Return the extension method that corresponds to given instance method `meth`. */
def extensionMethod(imeth: Symbol)(implicit ctx: Context): TermSymbol =
ctx.atPhase(ctx.extensionMethodsPhase.next) { implicit ctx =>
// FIXME use toStatic instead?
val companionInfo = imeth.owner.companionModule.info
val candidates = extensionNames(imeth) map (companionInfo.decl(_).symbol) filter (_.exists)
val matching = candidates filter (c => FullParameterization.memberSignature(c.info) == imeth.signature)
assert(matching.nonEmpty,
i"""no extension method found for:
|
| $imeth:${imeth.info.show} with signature ${imeth.signature}
|
| Candidates:
|
| ${candidates.map(c => c.name + ":" + c.info.show).mkString("\n")}
|
| Candidates (signatures normalized):
|
| ${candidates.map(c => c.name + ":" + c.info.signature + ":" + FullParameterization.memberSignature(c.info)).mkString("\n")}
|
| Eligible Names: ${extensionNames(imeth).mkString(",")}""")
matching.head.asTerm
}
}
|
ihji/dotty
|
tests/run/phantom-poly-3.scala
|
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
polyfun3(boo[Blinky])
polyfun3(boo[Inky])
polyfun3(boo[Pinky])
}
def polyfun3[G <: BooAny, I <: G](q: I): Unit = {
println("polyfun3")
}
}
object Boo extends Phantom {
type BooAny = this.Any
type Blinky <: this.Any
type Inky <: Blinky
type Pinky <: Inky
def boo[B <: Blinky]: B = assume
}
|
ihji/dotty
|
compiler/test/dotty/tools/dotc/SimplifyTests.scala
|
package dotty.tools.dotc
import org.junit.Assert._
import org.junit.Test
import dotty.tools.backend.jvm._
import dotty.tools.dotc.config.CompilerCommand
import dotty.tools.dotc.core.Contexts.FreshContext
import scala.tools.asm.tree.MethodNode
class SimplifyPosTests extends SimplifyTests(optimise = true)
class SimplifyNegTests extends SimplifyTests(optimise = false)
abstract class SimplifyTests(val optimise: Boolean) extends DottyBytecodeTest {
override protected def initializeCtx(c: FreshContext): Unit = {
super.initializeCtx(c)
if (optimise) {
val flags = Array("-optimise") // :+ "-Xprint:simplify"
val summary = CompilerCommand.distill(flags)(c)
c.setSettings(summary.sstate)
}
}
def check(source: String, expected: String, shared: String = ""): Unit = {
import ASMConverters._
val src =
s"""
$shared
|class A {
| def main(): Unit = {
$source
| }
|}
|class B {
| def main(): Unit = {
$expected
| }
|}
""".stripMargin
checkBCode(src) { dir =>
def instructions(clazz: String): List[Instruction] = {
val clsIn = dir.lookupName(s"$clazz.class", directory = false).input
val clsNode = loadClassNode(clsIn)
instructionsFromMethod(getMethod(clsNode, "main"))
}
val A = instructions("A")
val B = instructions("B")
val diff = diffInstructions(A, B)
if (optimise)
assert(A == B, s"Bytecode doesn't match: (lhs = source, rhs = expected) \n$diff")
else
assert(A != B, s"Same Bytecodes without -optimise: you are testing the wrong thing!")
}
}
@Test def inlineVals =
check("println(1)",
"""
|val one = 1
|val anotherone = one
|println(anotherone)
""")
@Test def inlineCaseIntrinsicsDottyApply =
check(
source = "CC.apply(1, 2)",
expected = "new CC(1, 2)",
shared = "case class CC(i: Int, j: Int)")
@Test def inlineCaseIntrinsicsScalacApply =
check("::.apply(1, Nil)", "new ::(1, Nil)")
@Test def inlineCaseIntrinsicsScalacUnapply =
check(
"""
|val t = Tuple2(1, "s")
|print(Tuple2.unapply(t))
""",
"""
|print(new Some(new Tuple2(1, "s")))
""")
@Test def constantFold =
check(
"""
|val t = true // val needed, or typer takes care of this
|if (t) print(1)
|else print(2)
""",
"""
|print(1)
""")
@Test def dropNoEffects =
check(
"""
|val a = "wow"
|print(1)
""",
"""
|print(1)
""")
@Test def dropNoEffectsTuple =
check("new Tuple2(1, 3)", "")
@Test def inlineLocalObjects =
check(
"""
|val t = new Tuple2(1, 3)
|print(t._1 + t._2)
""",
"""
|val i = 3
|print(1 + i) // Prevents typer from constant folding 1 + 3 to 4
""")
@Test def inlineOptions =
check(
"""
|val sum = Some("s")
|println(sum.isDefined)
""",
"""
|println(true)
""")
// @Test def listPatmapExample =
// check(
// """
// |val l = 1 :: 2 :: Nil
// |l match {
// | case Nil => print("nil")
// | case x :: xs => print(x)
// |}
// """,
// """TODO
// """)
// @Test def fooCCExample =
// check(
// source =
// """
// |val x: Any = new Object {}
// |val (a, b) = x match {
// | case CC(s @ 1, CC(t, _)) =>
// | (s , 2)
// | case _ => (42, 43)
// |}
// |a + b
// """,
// expected =
// """TODO
// """,
// shared = "case class CC(a: Int, b: Object)")
// @Test def booleansFunctionExample =
// check(
// """
// |val a: Any = new Object {}
// |val (b1, b2) = (a.isInstanceOf[String], a.isInstanceOf[List[Int]])
// |(b1, b2) match {
// | case (true, true) => true
// | case (false, false) => true
// | case _ => false
// |}
// """,
// """
// |val a: Any = new Object {}
// |val bl = a.isInstanceOf[List[_]]
// |val bl2 = a.isInstanceOf[String]
// |if (true == bl2 && true == bl)
// | true
// |else
// | false == bl2 && false == bl
// """)
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/repl/ConsoleWriter.scala
|
<gh_stars>0
package dotty.tools
package dotc
package repl
import java.io.Writer
/** A Writer that writes onto the Scala Console.
*
* @author <NAME>
* @version 1.0
*/
class ConsoleWriter extends Writer {
def close() = flush()
def flush() = Console.flush()
def write(cbuf: Array[Char], off: Int, len: Int): Unit =
if (len > 0)
write(new String(cbuf, off, len))
override def write(str: String): Unit = Console.print(str)
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/ElimErasedValueType.scala
|
package dotty.tools.dotc
package transform
import ast.{Trees, tpd}
import core._, core.Decorators._
import TreeTransforms._, Phases.Phase
import Types._, Contexts._, Constants._, Names._, NameOps._, Flags._, DenotTransformers._
import SymDenotations._, Symbols._, StdNames._, Annotations._, Trees._, Scopes._, Denotations._
import TypeErasure.ErasedValueType, ValueClasses._
/** This phase erases ErasedValueType to their underlying type.
* It also removes the synthetic cast methods u2evt$ and evt2u$ which are
* no longer needed afterwards.
* Finally, it checks that we don't introduce "double definitions" of pairs
* of methods that now have the same signature but were not considered matching
* before erasure.
*/
class ElimErasedValueType extends MiniPhaseTransform with InfoTransformer {
import tpd._
override def phaseName: String = "elimErasedValueType"
override def runsAfter: Set[Class[_ <: Phase]] = Set(classOf[Erasure])
def transformInfo(tp: Type, sym: Symbol)(implicit ctx: Context): Type = sym match {
case sym: ClassSymbol if sym is ModuleClass =>
sym.companionClass match {
case origClass: ClassSymbol if isDerivedValueClass(origClass) =>
val cinfo = tp.asInstanceOf[ClassInfo]
val decls1 = cinfo.decls.cloneScope
ctx.atPhase(this.next) { implicit ctx =>
// Remove synthetic cast methods introduced by ExtensionMethods,
// they are no longer needed after this phase.
decls1.unlink(cinfo.decl(nme.U2EVT).symbol)
decls1.unlink(cinfo.decl(nme.EVT2U).symbol)
}
cinfo.derivedClassInfo(decls = decls1)
case _ =>
tp
}
case _ =>
elimEVT(tp)
}
def elimEVT(tp: Type)(implicit ctx: Context): Type = tp match {
case ErasedValueType(_, underlying) =>
elimEVT(underlying)
case tp: MethodType =>
val paramTypes = tp.paramInfos.mapConserve(elimEVT)
val retType = elimEVT(tp.resultType)
tp.derivedLambdaType(tp.paramNames, paramTypes, retType)
case _ =>
tp
}
def transformTypeOfTree(tree: Tree)(implicit ctx: Context): Tree =
tree.withType(elimEVT(tree.tpe))
override def transformApply(tree: Apply)(implicit ctx: Context, info: TransformerInfo): Tree = {
val Apply(fun, args) = tree
// The casts to and from ErasedValueType are no longer needed once ErasedValueType
// has been eliminated.
val t =
if (fun.symbol.isValueClassConvertMethod)
args.head
else
tree
transformTypeOfTree(t)
}
/** Check that we don't have pairs of methods that override each other after
* this phase, yet do not have matching types before erasure.
* The before erasure test is performed after phase elimRepeated, so we
* do not need to special case pairs of `T* / Seq[T]` parameters.
*/
private def checkNoClashes(root: Symbol)(implicit ctx: Context) = {
val opc = new OverridingPairs.Cursor(root) {
override def exclude(sym: Symbol) =
!sym.is(Method) || sym.is(Bridge) || super.exclude(sym)
override def matches(sym1: Symbol, sym2: Symbol) =
sym1.signature == sym2.signature
}
def checkNoConflict(sym1: Symbol, sym2: Symbol, info: Type)(implicit ctx: Context): Unit = {
val site = root.thisType
val info1 = site.memberInfo(sym1)
val info2 = site.memberInfo(sym2)
if (!info1.matchesLoosely(info2))
ctx.error(
em"""double definition:
|$sym1: $info1 in ${sym1.owner} and
|$sym2: $info2 in ${sym2.owner}
|have same type after erasure: $info""",
root.pos)
}
val earlyCtx = ctx.withPhase(ctx.elimRepeatedPhase.next)
while (opc.hasNext) {
val sym1 = opc.overriding
val sym2 = opc.overridden
checkNoConflict(sym1, sym2, sym1.info)(earlyCtx)
opc.next()
}
}
override def transformTypeDef(tree: TypeDef)(implicit ctx: Context, info: TransformerInfo): Tree = {
checkNoClashes(tree.symbol)
tree
}
override def transformInlined(tree: Inlined)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
// FIXME: transformIf and transformBlock won't be required anymore once #444 is fixed.
override def transformIdent(tree: Ident)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
override def transformSelect(tree: Select)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
override def transformBlock(tree: Block)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
override def transformIf(tree: If)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
override def transformTypeTree(tree: TypeTree)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
}
|
ihji/dotty
|
tests/run/phantom-hk-2.scala
|
<reponame>ihji/dotty<gh_stars>0
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
type HKPhantom[X <: BooAny] = X
def main(args: Array[String]): Unit = {
fun(hkFun2(boo[Blinky]))
fun(hkFun2(boo[Inky]))
fun(hkFun2(boo[Pinky]))
}
def fun(top: BooAny): Unit = println("hk2")
def hkFun2[Y <: BooAny](p10: HKPhantom[Y]): HKPhantom[Y] = p10
}
object Boo extends Phantom {
type BooAny = Boo.Any
type Blinky <: Boo.Any
type Inky <: Blinky
type Pinky <: Inky
def boo[B <: Boo.Any]: B = assume
}
|
ihji/dotty
|
compiler/src/dotty/tools/dotc/transform/CheckPhantomCast.scala
|
package dotty.tools.dotc
package transform
import core._
import dotty.tools.dotc.transform.TreeTransforms.{MiniPhaseTransform, TransformerInfo}
import Types._
import Contexts.Context
import Symbols._
import Decorators._
import dotty.tools.dotc.ast.Trees._
import dotty.tools.dotc.ast.tpd
/** A no-op transform to ensure that the compiled sources have no Phantom types in casts */
class CheckPhantomCast extends MiniPhaseTransform { thisTransformer =>
override def phaseName = "checkPhantomCast"
override def checkPostCondition(tree: tpd.Tree)(implicit ctx: Context): Unit = {
tree match {
case TypeApply(fun, targs) if fun.symbol eq defn.Any_asInstanceOf => assert(!containsPhantom(targs.head.tpe))
case Bind(_, Typed(_, tpt)) => assert(!containsPhantom(tpt.tpe))
case _ =>
}
}
override def transformTypeApply(tree: tpd.TypeApply)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
if (tree.fun.symbol eq defn.Any_asInstanceOf)
checkNoPhantoms(tree.args.head)
tree
}
override def transformBind(tree: tpd.Bind)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
tree.body match {
case Typed(_, tpt) => checkNoPhantoms(tpt)
case _ =>
}
tree
}
private def checkNoPhantoms(tpTree: tpd.Tree)(implicit ctx: Context): Unit = {
if (containsPhantom(tpTree.tpe))
ctx.error("Cannot cast type containing a phantom type", tpTree.pos)
}
private def containsPhantom(tp: Type)(implicit ctx: Context): Boolean = new TypeAccumulator[Boolean] {
override def apply(x: Boolean, tp: Type): Boolean = x || tp.isPhantom || foldOver(false, tp)
}.apply(x = false, tp)
}
|
ihji/dotty
|
compiler/test/dotc/tests.scala
|
package dotc
import dotty.Jars
import dotty.tools.dotc.CompilerTest
import dotty.tools.StdLibSources
import org.junit.experimental.categories.Category
import org.junit.{Before, Test}
import org.junit.Assert._
import java.io.{ File => JFile }
import dotty.tools.io.Directory
import scala.io.Source
/** WARNING
* =======
* These are legacy, do not add tests here, see `CompilationTests.scala`
*/
@Category(Array(classOf[java.lang.Exception]))
class tests extends CompilerTest {
// tests that match regex '(pos|dotc|run|java|compileStdLib)\.*' would be
// executed as benchmarks.
val defaultOutputDir = "../out/"
val noCheckOptions = List(
// "-verbose",
// "-Ylog:frontend",
// "-Xprompt",
// "-explaintypes",
// "-Yshow-suppressed-errors",
"-pagewidth", "120",
"-d", defaultOutputDir
)
val checkOptions = List(
"-Yno-deep-subtypes",
"-Yno-double-bindings",
"-Yforce-sbt-phases",
"-color:never"
)
val classPath = {
val paths = Jars.dottyTestDeps map { p =>
val file = new JFile(p)
assert(
file.exists,
s"""|File "$p" couldn't be found. Run `packageAll` from build tool before
|testing.
|
|If running without sbt, test paths need to be setup environment variables:
|
| - DOTTY_LIBRARY
| - DOTTY_COMPILER
| - DOTTY_INTERFACES
| - DOTTY_EXTRAS
|
|Where these all contain locations, except extras which is a colon
|separated list of jars.
|
|When compiling with eclipse, you need the sbt-interfaces jar, put
|it in extras."""
)
file.getAbsolutePath
} mkString (":")
List("-classpath", paths)
}
implicit val defaultOptions: List[String] = noCheckOptions ++ {
if (dotty.Properties.isRunByDrone) List("-Ycheck:tailrec,resolveSuper,mixin,restoreScopes,labelDef,simplify") // should be Ycheck:all, but #725
else List("-Ycheck:tailrec,resolveSuper,mixin,restoreScopes,labelDef,simplify")
} ++ checkOptions ++ classPath
val testPickling = List("-Xprint-types", "-Ytest-pickler", "-Ystop-after:pickler", "-Yprintpos")
val twice = List("#runs", "2")
val staleSymbolError: List[String] = List()
val allowDeepSubtypes = defaultOptions diff List("-Yno-deep-subtypes")
val allowDoubleBindings = defaultOptions diff List("-Yno-double-bindings")
val scala2mode = List("-language:Scala2")
val explicitUTF8 = List("-encoding", "UTF8")
val explicitUTF16 = List("-encoding", "UTF16")
val testsDir = "../tests/"
val posDir = testsDir + "pos/"
val posSpecialDir = testsDir + "pos-special/"
val posScala2Dir = testsDir + "pos-scala2/"
val negDir = testsDir + "neg/"
val runDir = testsDir + "run/"
val newDir = testsDir + "new/"
val javaDir = testsDir + "pos-java-interop/"
val sourceDir = "./src/"
val dottyDir = sourceDir + "dotty/"
val toolsDir = dottyDir + "tools/"
val backendDir = toolsDir + "backend/"
val dotcDir = toolsDir + "dotc/"
val coreDir = dotcDir + "core/"
val parsingDir = dotcDir + "parsing/"
val dottyReplDir = dotcDir + "repl/"
val typerDir = dotcDir + "typer/"
val libDir = "../library/src/"
def dottyBootedLib = compileDir(libDir, ".", List("-deep", "-Ycheck-reentrant", "-strict") ::: defaultOptions)(allowDeepSubtypes) // note the -deep argument
def dottyDependsOnBootedLib = compileDir(dottyDir, ".", List("-deep", "-Ycheck-reentrant") ::: defaultOptions)(allowDeepSubtypes) // note the -deep argument
@Before def cleanup(): Unit = {
// remove class files from stdlib and tests compilation
Directory(defaultOutputDir + "scala").deleteRecursively()
Directory(defaultOutputDir + "java").deleteRecursively()
}
@Test def pickle_pickleOK = compileFiles(testsDir + "pickling/", testPickling)
// This directory doesn't exist anymore
// @Test def pickle_pickling = compileDir(coreDir, "pickling", testPickling)
@Test def pickle_ast = compileDir(dotcDir, "ast", testPickling)
@Test def pickle_inf = compileFile(posDir, "pickleinf", testPickling)
//@Test def pickle_core = compileDir(dotcDir, "core", testPickling, xerrors = 2) // two spurious comparison errors in Types and TypeOps
@Test def pos_arraycopy =
compileFile(runDir, "arraycopy", List("-Ylog-classpath"))
@Test def pos_t2168_pat = compileFile(posDir, "t2168", twice)
@Test def pos_erasure = compileFile(posDir, "erasure", twice)
@Test def pos_Coder() = compileFile(posDir, "Coder", twice)
@Test def pos_blockescapes() = compileFile(posDir, "blockescapes", twice)
@Test def pos_collections() = compileFile(posDir, "collections", twice)
@Test def pos_functions1() = compileFile(posDir, "functions1", twice)
@Test def pos_implicits1() = compileFile(posDir, "implicits1", twice)
@Test def pos_inferred() = compileFile(posDir, "inferred", twice)
@Test def pos_Patterns() = compileFile(posDir, "Patterns", twice)
@Test def pos_selftypes() = compileFile(posDir, "selftypes", twice)
@Test def pos_varargs() = compileFile(posDir, "varargs", twice)
@Test def pos_vararg_patterns() = compileFile(posDir, "vararg-pattern", twice)
@Test def pos_opassign() = compileFile(posDir, "opassign", twice)
@Test def pos_typedapply() = compileFile(posDir, "typedapply", twice)
@Test def pos_nameddefaults() = compileFile(posDir, "nameddefaults", twice)
@Test def pos_desugar() = compileFile(posDir, "desugar", twice)
@Test def pos_sigs() = compileFile(posDir, "sigs", twice)
@Test def pos_typers() = compileFile(posDir, "typers", twice)
@Test def pos_typedIdents() = compileDir(posDir, "typedIdents", twice)
@Test def pos_assignments() = compileFile(posDir, "assignments", twice)
@Test def pos_packageobject() = compileFile(posDir, "packageobject", twice)
@Test def pos_overloaded() = compileFile(posDir, "overloaded", twice)
@Test def pos_overrides() = compileFile(posDir, "overrides", twice)
@Test def pos_javaOverride() = compileDir(posDir, "java-override", twice)
@Test def pos_templateParents() = compileFile(posDir, "templateParents", twice)
@Test def pos_overloadedAccess = compileFile(posDir, "overloadedAccess", twice)
@Test def pos_approximateUnion = compileFile(posDir, "approximateUnion", twice)
@Test def pos_tailcall = compileDir(posDir, "tailcall", twice)
@Test def pos_valueclasses = compileFiles(posDir + "pos_valueclasses/", twice)
@Test def pos_nullarify = compileFile(posDir, "nullarify", args = "-Ycheck:nullarify" :: Nil)
@Test def pos_subtyping = compileFile(posDir, "subtyping", twice)
@Test def pos_packageObj = compileFile(posDir, "i0239", twice)
@Test def pos_anonClassSubtyping = compileFile(posDir, "anonClassSubtyping", twice)
@Test def pos_extmethods = compileFile(posDir, "extmethods", twice)
@Test def pos_companions = compileFile(posDir, "companions", twice)
@Test def posVarargsT1625 = compileFiles(posDir + "varargsInMethodsT1625/")
@Test def pos_all = compileFiles(posDir) // twice omitted to make tests run faster
@Test def pos_scala2_all = compileFiles(posScala2Dir, scala2mode)
@Test def rewrites = compileFile(posScala2Dir, "rewrites", "-rewrite" :: scala2mode)
@Test def pos_t8146a = compileFile(posSpecialDir, "t8146a")(allowDeepSubtypes)
@Test def pos_jon = compileFile(posSpecialDir, "jon")(allowDeepSubtypes)
@Test def pos_t5545 = {
// compile by hand in two batches, since junit lacks the infrastructure to
// compile files in multiple batches according to _1, _2, ... suffixes.
compileFile(posSpecialDir, "spec-t5545/S_1")
compileFile(posSpecialDir, "spec-t5545/S_2")
}
@Test def pos_utf8 = compileFile(posSpecialDir, "utf8encoded", explicitUTF8)
@Test def pos_utf16 = compileFile(posSpecialDir, "utf16encoded", explicitUTF16)
@Test def new_all = compileFiles(newDir, twice)
@Test def neg_all = compileFiles(negDir, verbose = true, compileSubDirs = false)
@Test def neg_typedIdents() = compileDir(negDir, "typedIdents")
@Test def negVarargsT1625 = compileFiles(negDir + "varargsInMethodsT1625/")
val negCustomArgs = negDir + "customArgs/"
@Test def neg_typers() = compileFile(negCustomArgs, "typers")(allowDoubleBindings)
@Test def neg_overrideClass = compileFile(negCustomArgs, "overrideClass", scala2mode)
@Test def neg_autoTupling = compileFile(negCustomArgs, "autoTuplingTest", args = "-language:noAutoTupling" :: Nil)
@Test def neg_i1050 = compileFile(negCustomArgs, "i1050", List("-strict"))
@Test def neg_i1240 = compileFile(negCustomArgs, "i1240")(allowDoubleBindings)
@Test def neg_i2002 = compileFile(negCustomArgs, "i2002")(allowDoubleBindings)
@Test def neg_valueclasses_doubledefs = compileFile(negCustomArgs, "valueclasses-doubledefs")(allowDoubleBindings)
@Test def neg_valueclasses_doubledefs2 = compileFile(negCustomArgs, "valueclasses-doubledefs2")(allowDoubleBindings)
@Test def neg_valueclasses_pavlov = compileFile(negCustomArgs, "valueclasses-pavlov")(allowDoubleBindings)
@Test def neg_trailingUnderscore = compileFile(negCustomArgs, "trailingUnderscore", args = "-strict" :: Nil)
val negTailcallDir = negDir + "tailcall/"
@Test def neg_tailcall_t1672b = compileFile(negTailcallDir, "t1672b")
@Test def neg_tailcall_t3275 = compileFile(negTailcallDir, "t3275")
@Test def neg_tailcall_t6574 = compileFile(negTailcallDir, "t6574")
@Test def neg_tailcall = compileFile(negTailcallDir, "tailrec")
@Test def neg_tailcall2 = compileFile(negTailcallDir, "tailrec-2")
@Test def neg_tailcall3 = compileFile(negTailcallDir, "tailrec-3")
@Test def neg_nopredef = compileFile(negCustomArgs, "nopredef", List("-Yno-predef"))
@Test def neg_noimports = compileFile(negCustomArgs, "noimports", List("-Yno-imports"))
@Test def neg_noimpots2 = compileFile(negCustomArgs, "noimports2", List("-Yno-imports"))
@Test def run_all = runFiles(runDir)
private val stdlibFiles: List[String] = StdLibSources.whitelisted
@Test def compileStdLib =
compileList("compileStdLib", stdlibFiles, "-migration" :: "-Yno-inline" :: scala2mode)
@Test def compileMixed = compileLine(
"""../tests/pos/B.scala
|../scala2-library/src/library/scala/collection/immutable/Seq.scala
|../scala2-library/src/library/scala/collection/parallel/ParSeq.scala
|../scala2-library/src/library/scala/package.scala
|../scala2-library/src/library/scala/collection/GenSeqLike.scala
|../scala2-library/src/library/scala/collection/SeqLike.scala
|../scala2-library/src/library/scala/collection/generic/GenSeqFactory.scala""".stripMargin)
@Test def compileIndexedSeq = compileLine("../scala2-library/src/library/scala/collection/immutable/IndexedSeq.scala")
@Test def compileParSetLike = compileLine("../scala2-library/src/library/scala/collection/parallel/mutable/ParSetLike.scala")
@Test def compileParSetSubset = compileLine(
"""../scala2-library/src/library/scala/collection/parallel/mutable/ParSetLike.scala
|../scala2-library/src/library/scala/collection/parallel/mutable/ParSet.scala
|../scala2-library/src/library/scala/collection/mutable/SetLike.scala""".stripMargin)(scala2mode ++ defaultOptions)
@Test def dottyBooted = {
dottyBootedLib
dottyDependsOnBootedLib
}
@Test def dotc_ast = compileDir(dotcDir, "ast")
@Test def dotc_config = compileDir(dotcDir, "config")
@Test def dotc_core = compileDir(dotcDir, "core")(allowDeepSubtypes)// twice omitted to make tests run faster
@Test def dotc_core_nocheck = compileDir(dotcDir, "core")(noCheckOptions ++ classPath)
// This directory doesn't exist anymore
// @Test def dotc_core_pickling = compileDir(coreDir, "pickling")(allowDeepSubtypes)// twice omitted to make tests run faster
@Test def dotc_transform = compileDir(dotcDir, "transform")(allowDeepSubtypes)// twice omitted to make tests run faster
@Test def dotc_parsing = compileDir(dotcDir, "parsing") // twice omitted to make tests run faster
@Test def dotc_printing = compileDir(dotcDir, "printing") // twice omitted to make tests run faster
@Test def dotc_reporting = compileDir(dotcDir, "reporting") // twice omitted to make tests run faster
@Test def dotc_typer = compileDir(dotcDir, "typer")// twice omitted to make tests run faster
// error: error while loading Checking$$anon$2$,
// class file 'target/scala-2.11/dotty_2.11-0.1.1-bin-SNAPSHOT.jar(dotty/tools/dotc/typer/Checking$$anon$2.class)'
// has location not matching its contents: contains class $anon
@Test def dotc_util = compileDir(dotcDir, "util") // twice omitted to make tests run faster
@Test def tools_io = compileDir(toolsDir, "io") // inner class has symbol <none>
@Test def helloWorld = compileFile(posDir, "HelloWorld")
@Test def labels = compileFile(posDir, "Labels", twice)
//@Test def tools = compileDir(dottyDir, "tools", "-deep" :: Nil)(allowDeepSubtypes)
@Test def testNonCyclic = compileList("testNonCyclic", List(
dotcDir + "CompilationUnit.scala",
coreDir + "Types.scala",
dotcDir + "ast/Trees.scala"
), List("-Xprompt") ++ staleSymbolError ++ twice)
@Test def testIssue_34 = compileList("testIssue_34", List(
dotcDir + "config/Properties.scala",
dotcDir + "config/PathResolver.scala"
), List(/* "-Ylog:frontend", */ "-Xprompt") ++ staleSymbolError ++ twice)
@Test def java_all = compileFiles(javaDir, twice)
//@Test def dotc_compilercommand = compileFile(dotcDir + "config/", "CompilerCommand")
//TASTY tests
@Test def tasty_new_all = compileFiles(newDir, testPickling)
@Test def tasty_dotty = compileDir(sourceDir, "dotty", testPickling)
// Disabled because we get stale symbol errors on the SourceFile annotation, which is normal.
// @Test def tasty_annotation_internal = compileDir(s"${dottyDir}annotation/", "internal", testPickling)
@Test def tasty_runtime = compileDir(s"${libDir}dotty/", "runtime", testPickling)
@Test def tasty_runtime_vc = compileDir(s"${libDir}dotty/runtime/", "vc", testPickling)
@Test def tasty_tools = compileDir(dottyDir, "tools", testPickling)
//TODO: issue with ./src/dotty/tools/backend/jvm/DottyBackendInterface.scala
@Test def tasty_backend_jvm = compileList("tasty_backend_jvm", List(
"CollectEntryPoints.scala", "GenBCode.scala", "LabelDefs.scala",
"scalaPrimitives.scala"
) map (s"${backendDir}jvm/" + _), testPickling)
//@Test def tasty_backend_sjs = compileDir(s"${backendDir}", "sjs", testPickling)
@Test def tasty_dotc = compileDir(toolsDir, "dotc", testPickling)
@Test def tasty_dotc_ast = compileDir(dotcDir, "ast", testPickling)
@Test def tasty_dotc_config = compileDir(dotcDir, "config", testPickling)
//TODO: issue with ./src/dotty/tools/dotc/core/Types.scala
@Test def tasty_core = compileList("tasty_core", List(
"Annotations.scala", "Constants.scala", "Constraint.scala", "ConstraintHandling.scala",
"ConstraintRunInfo.scala", "Contexts.scala", "Decorators.scala", "Definitions.scala",
"DenotTransformers.scala", "Denotations.scala", "Flags.scala", "Hashable.scala",
"NameOps.scala", "Names.scala", "OrderingConstraint.scala", "Periods.scala",
"Phases.scala", "Scopes.scala", "Signature.scala", "StdNames.scala",
"Substituters.scala", "SymDenotations.scala", "SymbolLoaders.scala", "Symbols.scala",
"TypeApplications.scala", "TypeComparer.scala", "TypeErasure.scala", "TypeOps.scala",
"TyperState.scala", "Uniques.scala"
) map (coreDir + _), testPickling)
@Test def tasty_classfile = compileDir(coreDir, "classfile", testPickling)
@Test def tasty_tasty = compileDir(coreDir, "tasty", testPickling)
@Test def tasty_unpickleScala2 = compileDir(coreDir, "unpickleScala2", testPickling)
//TODO: issue with ./src/dotty/tools/dotc/parsing/Parsers.scala
@Test def tasty_dotc_parsing = compileList("tasty_dotc_parsing", List(
"CharArrayReader.scala", "JavaParsers.scala", "JavaScanners.scala", "JavaTokens.scala",
"MarkupParserCommon.scala", "MarkupParsers.scala", "package.scala" ,"Scanners.scala",
"ScriptParsers.scala", "SymbolicXMLBuilder.scala", "Tokens.scala", "Utility.scala"
) map (parsingDir + _), testPickling)
@Test def tasty_dotc_printing = compileDir(dotcDir, "printing", testPickling)
@Test def tasty_dotc_repl = compileDir(dotcDir, "repl", testPickling)
//@Test def tasty_dotc_reporting = compileDir(dotcDir, "reporting", testPickling)
@Test def tasty_dotc_rewrite = compileDir(dotcDir, "rewrite", testPickling)
//TODO: issues with LazyVals.scala, PatternMatcher.scala
@Test def tasty_dotc_transform = compileList("tasty_dotc_transform", List(
"AugmentScala2Traits.scala", "CapturedVars.scala", "CheckReentrant.scala", "CheckStatic.scala",
"ClassOf.scala", "CollectEntryPoints.scala", "Constructors.scala", "CrossCastAnd.scala",
"CtxLazy.scala", "ElimByName.scala", "ElimErasedValueType.scala", "ElimRepeated.scala",
"ElimStaticThis.scala", "Erasure.scala", "ExpandPrivate.scala", "ExpandSAMs.scala",
"ExplicitOuter.scala", "ExtensionMethods.scala", "FirstTransform.scala",
"Flatten.scala", "FullParameterization.scala", "FunctionalInterfaces.scala", "GetClass.scala",
"Getters.scala", "InterceptedMethods.scala", "LambdaLift.scala", "LiftTry.scala", "LinkScala2Impls.scala",
"MacroTransform.scala", "Memoize.scala", "Mixin.scala", "MixinOps.scala", "NonLocalReturns.scala",
"NormalizeFlags.scala", "OverridingPairs.scala", "ParamForwarding.scala", "Pickler.scala", "PostTyper.scala",
"ResolveSuper.scala", "RestoreScopes.scala", "SeqLiterals.scala", "Splitter.scala", "SuperAccessors.scala",
"SymUtils.scala", "SyntheticMethods.scala", "TailRec.scala", "TreeChecker.scala", "TreeExtractors.scala",
"TreeGen.scala", "TreeTransform.scala", "TypeTestsCasts.scala", "TypeUtils.scala", "ValueClasses.scala",
"VCElideAllocations.scala", "VCInlineMethods.scala"
) map (s"${dotcDir}transform/" + _), testPickling)
//TODO: issue with ./src/dotty/tools/dotc/typer/Namer.scala
@Test def tasty_typer = compileList("tasty_typer", List(
"Applications.scala", "Checking.scala", "ConstFold.scala", "ErrorReporting.scala",
"EtaExpansion.scala", "FrontEnd.scala", "Implicits.scala", "ImportInfo.scala",
"Inferencing.scala", "ProtoTypes.scala", "ReTyper.scala", "RefChecks.scala",
"TypeAssigner.scala", "Typer.scala", "VarianceChecker.scala", "Variances.scala"
) map (typerDir + _), testPickling)
@Test def tasty_dotc_util = compileDir(dotcDir, "util", testPickling)
@Test def tasty_tools_io = compileDir(toolsDir, "io", testPickling)
// Disabled, not worth porting since we're getting rid of the old JUnit tests soon.
/*@Test*/ def tasty_bootstrap = {
val logging = if (false) List("-Ylog-classpath", "-verbose") else Nil
val opt = List("-priorityclasspath", defaultOutputDir) ++ logging
// first compile dotty
compileDir(dottyDir, ".", List("-deep", "-Ycheck-reentrant", "-strict") ++ logging)(allowDeepSubtypes)
compileDir(libDir, "dotty", "-deep" :: opt)
compileDir(libDir, "scala", "-deep" :: opt)
compileDir(dottyDir, "tools", opt)
compileDir(toolsDir, "dotc", opt)
compileDir(dotcDir, "ast", opt)
compileDir(dotcDir, "config", opt)
compileDir(dotcDir, "parsing", opt)
compileDir(dotcDir, "printing", opt)
compileDir(dotcDir, "repl", opt)
compileDir(dotcDir, "reporting", opt)
compileDir(dotcDir, "rewrite", opt)
compileDir(dotcDir, "transform", opt)
compileDir(dotcDir, "typer", opt)
compileDir(dotcDir, "util", opt)
}
}
|
ihji/dotty
|
sbt-bridge/src/xsbt/ConsoleInterface.scala
|
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 <NAME>
*/
package xsbt
import xsbti.Logger
import dotty.tools.dotc.core.Contexts.Context
import dotty.tools.dotc.repl.REPL
import dotty.tools.dotc.repl.REPL.Config
class ConsoleInterface {
def commandArguments(
args: Array[String],
bootClasspathString: String,
classpathString: String,
log: Logger
): Array[String] = args
def run(args: Array[String],
bootClasspathString: String,
classpathString: String,
initialCommands: String,
cleanupCommands: String,
loader: ClassLoader,
bindNames: Array[String],
bindValues: Array[Any],
log: Logger
): Unit = {
val completeArgs =
args :+
"-bootclasspath" :+ bootClasspathString :+
"-classpath" :+ classpathString
println("Starting dotty interpreter...")
val repl = ConsoleInterface.customRepl(
initialCommands :: Nil,
cleanupCommands :: Nil,
bindNames zip bindValues,
loader
)
repl.process(completeArgs)
}
}
object ConsoleInterface {
def customConfig(
initCmds: List[String],
cleanupCmds: List[String],
boundVals: Array[(String, Any)],
loader: ClassLoader
) = new Config {
override val initialCommands: List[String] = initCmds
override val cleanupCommands: List[String] = cleanupCmds
override val boundValues: Array[(String, Any)] = boundVals
override val classLoader: Option[ClassLoader] = Option(loader)
}
def customRepl(cfg: Config): REPL = new REPL {
override lazy val config = cfg
}
def customRepl(
initCmds: List[String],
cleanupCmds: List[String],
boundVals: Array[(String, Any)],
loader: ClassLoader
): REPL = customRepl(customConfig(initCmds, cleanupCmds, boundVals, loader))
}
|
ihji/dotty
|
tests/run/mixins1/A_1.scala
|
<filename>tests/run/mixins1/A_1.scala
trait A {
var x = 3
println("hi")
val y = x * x
def f: Int = x + y
def f(z: Int): Int = f + z
}
|
ihji/dotty
|
tests/run/phantom-poly-1.scala
|
<filename>tests/run/phantom-poly-1.scala<gh_stars>0
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
polyfun1()
polyfun1[Casper]()
}
def polyfun1[P <: Casper](): Unit = {
println("polyfun1")
}
}
object Boo extends Phantom {
type Casper <: this.Any
def boo[B <: this.Any]: B = assume
}
|
ihji/dotty
|
tests/neg/phantom-type-param-bounds-2.scala
|
<filename>tests/neg/phantom-type-param-bounds-2.scala
class phantomTypeParamBounds2 {
def fun1[X <: Boo.Any & Any] = ??? // error
def fun2[X <: Boo.Any | Any] = ??? // error
def fun3[X >: Boo.Nothing & Nothing] = ??? // error
def fun4[X >: Boo.Nothing | Nothing] = ??? // error
def fun5[X >: Boo.Any & Any <: Boo.Any & Any] = ??? // error // error
}
object Boo extends Phantom {
def boo[B <: Boo.Any]: B = assume
}
|
ihji/dotty
|
compiler/src/dotty/tools/backend/jvm/LabelDefs.scala
|
<reponame>ihji/dotty
package dotty.tools.backend.jvm
import dotty.tools.dotc.ast.Trees.Thicket
import dotty.tools.dotc.ast.{Trees, tpd}
import dotty.tools.dotc.core.Contexts.Context
import dotty.tools.dotc.core.Types
import dotty.tools.dotc.transform.TreeTransforms.{TransformerInfo, TreeTransform, MiniPhase, MiniPhaseTransform}
import dotty.tools.dotc
import dotty.tools.dotc.backend.jvm.DottyPrimitives
import dotty.tools.dotc.core.Flags.FlagSet
import dotty.tools.dotc.transform.Erasure
import dotty.tools.dotc.transform.SymUtils._
import java.io.{File => JFile}
import scala.collection.generic.Clearable
import scala.collection.mutable
import scala.collection.mutable.{ListBuffer, ArrayBuffer}
import scala.reflect.ClassTag
import dotty.tools.io.{Directory, PlainDirectory, AbstractFile}
import scala.tools.asm.{ClassVisitor, FieldVisitor, MethodVisitor}
import scala.tools.nsc.backend.jvm.{BCodeHelpers, BackendInterface}
import dotty.tools.dotc.core._
import Periods._
import SymDenotations._
import Contexts._
import Types._
import Symbols._
import Denotations._
import Phases._
import java.lang.AssertionError
import dotty.tools.dotc.util.Positions.Position
import Decorators._
import tpd._
import Flags._
import StdNames.nme
/**
* Verifies that each Label DefDef has only a single address to jump back and
* reorders them such that they are not nested and this address is a
* fall-through address for the JVM.
*
* ```scala
* <label> def foo(i: Int) = {
* <label> def bar = 0
* <label> def dough(i: Int) = if (i == 0) bar else foo(i-1)
* dough(i)
* }
*
* foo(100)
* ```
*
* will get rewritten to:
*
* ```scala
* <label> def foo(i: Int) = dough(i)
* <label> def dough(i: Int) = if (i == 0) bar else foo(i-1)
* <label> def bar = 2
* foo(100)
* ```
*
* Proposed way to generate this pattern in backend is:
*
* ```scala
* foo(100)
* <jump foo>
* <label> def foo(i: Int) = dough(i)
* // <jump a> // unreachable
* <label> def dough(i: Int) = if (i == 0) bar else foo(i-1)
* // <jump a> // unreachable
* <label> def bar = 2
* // <jump a> // unreachable
* <asm point a>
* ```
*
* Unreachable jumps will be eliminated by local dead code analysis.
* After JVM is smart enough to remove next-line jumps
*
* Note that his phase Ychecking this phase required softening scoping rules
* as it intentionally allowed to break scoping rules inside methods for labels.
* This is modified by setting `labelsReordered` flag in Phases.
*
* @author <NAME>
*/
class LabelDefs extends MiniPhaseTransform {
def phaseName: String = "labelDef"
val queue = new ArrayBuffer[Tree]()
val beingAppended = new mutable.HashSet[Symbol]()
var labelLevel = 0
override def transformDefDef(tree: tpd.DefDef)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
if (tree.symbol is Flags.Label) tree
else {
collectLabelDefs.clear
val newRhs = collectLabelDefs.transform(tree.rhs)
var labelDefs = collectLabelDefs.labelDefs
def putLabelDefsNearCallees = new TreeMap() {
override def transform(tree: tpd.Tree)(implicit ctx: Context): tpd.Tree = {
tree match {
case t: Apply if labelDefs.contains(t.symbol) =>
val labelDef = labelDefs(t.symbol)
labelDefs -= t.symbol
val labelDef2 = transform(labelDef)
Block(labelDef2:: Nil, t)
case _ => if (labelDefs.nonEmpty) super.transform(tree) else tree
}
}
}
val res = cpy.DefDef(tree)(rhs = putLabelDefsNearCallees.transform(newRhs))
res
}
}
object collectLabelDefs extends TreeMap() {
// labelSymbol -> Defining tree
val labelDefs = new mutable.HashMap[Symbol, Tree]()
def clear = {
labelDefs.clear()
}
override def transform(tree: tpd.Tree)(implicit ctx: Context): tpd.Tree = tree match {
case t: Template => t
case t: Block =>
val r = super.transform(t)
r match {
case t: Block if t.stats.isEmpty => t.expr
case _ => r
}
case t: DefDef =>
assert(t.symbol is Flags.Label)
val r = super.transform(tree)
labelDefs(r.symbol) = r
EmptyTree
case t: Apply if t.symbol is Flags.Label =>
val sym = t.symbol
super.transform(tree)
case _ =>
super.transform(tree)
}
}
}
|
ihji/dotty
|
tests/run/phantom-methods-10.scala
|
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomRefErasure,phantomErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
fun2.pacFun4(inky)
}
def pacFun4(clyde: Inky) = {
println("pacFun4")
}
def inky: Inky = {
println("inky")
boo[Inky]
}
def fun2 = {
println("fun")
this
}
}
object Boo extends Phantom {
type Inky <: this.Any
def boo[B <: this.Any]: B = assume
}
|
ihji/dotty
|
tests/neg/phantom-var.scala
|
class Foo {
var foo = Boo.boo // error: var fields cannot have Phantom types
}
object Boo extends Phantom {
def boo = assume
}
|
ihji/dotty
|
tests/neg/i1650.scala
|
object Test {
test4(test4$default$1) // error
def test4[T[P]](x: T[T[List[T[X forSome { type X }]]]]) = ??? // error // error
def test4$default$1[T[P]]: T[Int] = ???
}
|
ihji/dotty
|
tests/run/phantom-hk-1.scala
|
<reponame>ihji/dotty
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
hkFun1(boo[Blinky])
hkFun1(boo[Inky])
hkFun1(boo[Pinky])
}
type HKPhantom[X <: Blinky] = X
def hkFun1[Y <: Blinky](p9: HKPhantom[Y]) = {
println("hkFun1")
}
}
trait Phantoms {
}
object Boo extends Phantom {
type Blinky <: this.Any
type Inky <: Blinky
type Pinky <: Inky
def boo[B <: this.Any]: B = assume
}
|
ihji/dotty
|
tests/run/phantom-methods-3.scala
|
<reponame>ihji/dotty<filename>tests/run/phantom-methods-3.scala
/* Run this test with
* `run tests/run/xyz.scala -Xprint-diff-del -Xprint:arrayConstructors,phantomTermErasure,phantomTypeErasure,erasure`
* to see the the diffs after PhantomRefErasure, PhantomDeclErasure and Erasure.
*/
object Test {
import Boo._
def main(args: Array[String]): Unit = {
fun(phantomFun3(boo[Blinky]))
fun(phantomFun3(boo[Inky]))
fun(phantomFun3(boo[Pinky]))
}
def fun(top: Blinky): Unit = println("fun")
def phantomFun3[P <: Blinky](p7: P): Blinky = p7
}
object Boo extends Phantom {
type Blinky <: Boo.Any
type Inky <: Blinky
type Pinky <: Inky
def boo[B <: Boo.Any]: B = assume
}
|
arajajyothibabu/game-of-life
|
src/main/scala/GameOfLife.scala
|
<reponame>arajajyothibabu/game-of-life<filename>src/main/scala/GameOfLife.scala
/**
* Created by jyothi on 18/11/17.
*/
case class Cell(x: Int, y: Int)
class GameOfLife(inputPanel: Array[Array[Int]] = Array()) {
val alive = 1
val dead = 0
private val defaultGamePanel = (0 until 5).map(x => (0 until 5).map(y => dead).toArray).toArray
private val gamePanel = if(inputPanel.length == 0) defaultGamePanel else inputPanel
private val sizeOfPanel = gamePanel.length
def printPanel(): Unit = gamePanel.foreach(x => println(x.mkString(" ")))
def numberOfNeighbours(x: Int, y: Int): Int = {
var neighbours = 0
if(x - 1 > -1 && y - 1 > -1) neighbours += gamePanel(x - 1)(y - 1) //(x - 1, y - 1)
if(x - 1 > -1) neighbours += gamePanel(x - 1)(y) //(x - 1, y)
if(x - 1 > -1 && y + 1 < sizeOfPanel) neighbours += gamePanel(x - 1)(y + 1) //(x - 1, y + 1)
if(y - 1 > -1) neighbours += gamePanel(x)(y - 1) //(x, y - 1)
if(y + 1 < sizeOfPanel) neighbours += gamePanel(x)(y + 1) //(x, y + 1)
if(x + 1 < sizeOfPanel && y - 1 > -1) neighbours += gamePanel(x + 1)(y - 1) //(x + 1, y - 1)
if(x + 1 < sizeOfPanel) neighbours += gamePanel(x + 1)(y) //(x + 1, y)
if(x + 1 < sizeOfPanel && y + 1 < sizeOfPanel) neighbours += gamePanel(x + 1)(y + 1) //(x + 1, y + 1)
neighbours
}
private def ruleOne(x: Int, y: Int): Boolean = numberOfNeighbours(x, y) < 2
private def ruleTwo(x: Int, y: Int): Boolean = numberOfNeighbours(x, y) > 3
private def ruleThree(x: Int, y: Int): Boolean = numberOfNeighbours(x, y) == 3
def killAlive(cell: Cell): Unit = {
gamePanel(cell.x)(cell.y) = dead
}
def giveBirth(cell: Cell): Unit = {
gamePanel(cell.x)(cell.y) = alive
}
def nextGeneration(): Unit = {
var readyToDie = List[Cell]()
var readyToBorn = List[Cell]()
for(x <- gamePanel.indices; y <- gamePanel(x).indices){
if(ruleOne(x, y)){
readyToDie ::= Cell(x, y)
}
if(ruleTwo(x, y)){
readyToDie ::= Cell(x, y)
}
if(ruleThree(x, y)){
readyToBorn ::= Cell(x, y)
}
}
readyToDie.foreach(killAlive)
readyToBorn.foreach(giveBirth)
}
}
|
arajajyothibabu/game-of-life
|
src/main/scala/Main.scala
|
/**
* Created by jyothi on 18/11/17.
*/
/**
* 1. Any Live cell with neighbours fewer than 2 are dead
* 2. Any Live cell with more than 3 live neighbours dies of over population
* 3. Any dead cell with exactly 3 live neighbours comes to life of warmth
*/
object Main extends App {
val panel = (0 until 5).map(x => (0 until 5).map(y => 0).toArray).toArray
panel(2)(2) = 1
panel(1)(2) = 1
panel(3)(2) = 1
val gameOfLife = new GameOfLife(panel)
gameOfLife.printPanel() //printing input panel
gameOfLife.nextGeneration()
gameOfLife.printPanel() //printing output panel
gameOfLife.nextGeneration()
gameOfLife.printPanel() //printing output panel
gameOfLife.nextGeneration()
gameOfLife.printPanel() //printing output panel
gameOfLife.nextGeneration()
gameOfLife.printPanel() //printing output panel
}
|
arajajyothibabu/game-of-life
|
build.sbt
|
name := "GameOfLife"
version := "1.0"
scalaVersion := "2.12.1"
|
rohitkumarjoshi/gitbucket
|
src/main/scala/gitbucket/core/service/CommitsService.scala
|
<gh_stars>0
package gitbucket.core.service
import gitbucket.core.model.CommitComment
import gitbucket.core.model.Profile._
import gitbucket.core.model.Profile.profile.blockingApi._
import gitbucket.core.model.Profile.dateColumnType
trait CommitsService {
def getCommitComments(owner: String, repository: String, commitId: String, includePullRequest: Boolean)(
implicit s: Session
) =
CommitComments filter { t =>
t.byCommit(owner, repository, commitId) && (t.issueId.isEmpty || includePullRequest)
} list
def getCommitComment(owner: String, repository: String, commentId: String)(implicit s: Session) =
if (commentId forall (_.isDigit))
CommitComments filter { t =>
t.byPrimaryKey(commentId.toInt) && t.byRepository(owner, repository)
} firstOption
else
None
def createCommitComment(
owner: String,
repository: String,
commitId: String,
loginUser: String,
content: String,
fileName: Option[String],
oldLine: Option[Int],
newLine: Option[Int],
issueId: Option[Int]
)(implicit s: Session): Int =
CommitComments returning CommitComments.map(_.commentId) insert CommitComment(
userName = owner,
repositoryName = repository,
commitId = commitId,
commentedUserName = loginUser,
content = content,
fileName = fileName,
oldLine = oldLine,
newLine = newLine,
registeredDate = currentDate,
updatedDate = currentDate,
issueId = issueId
)
def updateCommitCommentPosition(commentId: Int, commitId: String, oldLine: Option[Int], newLine: Option[Int])(
implicit s: Session
): Unit =
CommitComments
.filter(_.byPrimaryKey(commentId))
.map { t =>
(t.commitId, t.oldLine, t.newLine)
}
.update(commitId, oldLine, newLine)
def updateCommitComment(commentId: Int, content: String)(implicit s: Session) = {
CommitComments
.filter(_.byPrimaryKey(commentId))
.map { t =>
(t.content, t.updatedDate)
}
.update(content, currentDate)
}
def deleteCommitComment(commentId: Int)(implicit s: Session) =
CommitComments filter (_.byPrimaryKey(commentId)) delete
}
|
rohitkumarjoshi/gitbucket
|
src/main/scala/gitbucket/core/controller/ReleasesController.scala
|
<filename>src/main/scala/gitbucket/core/controller/ReleasesController.scala<gh_stars>0
package gitbucket.core.controller
import java.io.File
import gitbucket.core.service.{AccountService, ActivityService, ReleaseService, RepositoryService}
import gitbucket.core.util.{FileUtil, ReadableUsersAuthenticator, ReferrerAuthenticator, WritableUsersAuthenticator}
import gitbucket.core.util.Directory._
import gitbucket.core.util.Implicits._
import org.scalatra.forms._
import gitbucket.core.releases.html
import org.apache.commons.io.FileUtils
import scala.collection.JavaConverters._
class ReleaseController
extends ReleaseControllerBase
with RepositoryService
with AccountService
with ReleaseService
with ActivityService
with ReadableUsersAuthenticator
with ReferrerAuthenticator
with WritableUsersAuthenticator
trait ReleaseControllerBase extends ControllerBase {
self: RepositoryService
with AccountService
with ReleaseService
with ReadableUsersAuthenticator
with ReferrerAuthenticator
with WritableUsersAuthenticator
with ActivityService =>
case class ReleaseForm(
name: String,
content: Option[String]
)
val releaseForm = mapping(
"name" -> trim(text(required)),
"content" -> trim(optional(text()))
)(ReleaseForm.apply)
get("/:owner/:repository/releases")(referrersOnly { repository =>
val releases = getReleases(repository.owner, repository.name)
val assets = getReleaseAssetsMap(repository.owner, repository.name)
html.list(
repository,
repository.tags.reverse.map { tag =>
(tag, releases.find(_.tag == tag.name).map { release =>
(release, assets(release))
})
},
hasDeveloperRole(repository.owner, repository.name, context.loginAccount)
)
})
get("/:owner/:repository/releases/:tag")(referrersOnly { repository =>
val tagName = params("tag")
getRelease(repository.owner, repository.name, tagName)
.map { release =>
html.release(
release,
getReleaseAssets(repository.owner, repository.name, tagName),
hasDeveloperRole(repository.owner, repository.name, context.loginAccount),
repository
)
}
.getOrElse(NotFound())
})
get("/:owner/:repository/releases/:tag/assets/:fileId")(referrersOnly { repository =>
val tagName = params("tag")
val fileId = params("fileId")
(for {
_ <- repository.tags.find(_.name == tagName)
_ <- getRelease(repository.owner, repository.name, tagName)
asset <- getReleaseAsset(repository.owner, repository.name, tagName, fileId)
} yield {
response.setHeader("Content-Disposition", s"attachment; filename=${asset.label}")
RawData(
FileUtil.getMimeType(asset.label),
new File(getReleaseFilesDir(repository.owner, repository.name), tagName + "/" + fileId)
)
}).getOrElse(NotFound())
})
get("/:owner/:repository/releases/:tag/create")(writableUsersOnly { repository =>
val tagName = params("tag")
repository.tags
.find(_.name == tagName)
.map { tag =>
html.form(repository, tag, None)
}
.getOrElse(NotFound())
})
post("/:owner/:repository/releases/:tag/create", releaseForm)(writableUsersOnly { (form, repository) =>
val tagName = params("tag")
val loginAccount = context.loginAccount.get
// Insert into RELEASE
createRelease(repository.owner, repository.name, form.name, form.content, tagName, loginAccount)
// Insert into RELEASE_ASSET
val files = params.collect {
case (name, value) if name.startsWith("file:") =>
val Array(_, fileId) = name.split(":")
(fileId, value)
}
files.foreach {
case (fileId, fileName) =>
val size =
new java.io.File(getReleaseFilesDir(repository.owner, repository.name), tagName + "/" + fileId).length
createReleaseAsset(repository.owner, repository.name, tagName, fileId, fileName, size, loginAccount)
}
recordReleaseActivity(repository.owner, repository.name, loginAccount.userName, form.name)
redirect(s"/${repository.owner}/${repository.name}/releases/${tagName}")
})
get("/:owner/:repository/releases/:tag/edit")(writableUsersOnly { repository =>
val tagName = params("tag")
(for {
release <- getRelease(repository.owner, repository.name, tagName)
tag <- repository.tags.find(_.name == tagName)
} yield {
html.form(repository, tag, Some(release, getReleaseAssets(repository.owner, repository.name, tagName)))
}).getOrElse(NotFound())
})
post("/:owner/:repository/releases/:tag/edit", releaseForm)(writableUsersOnly {
(form, repository) =>
val tagName = params("tag")
val loginAccount = context.loginAccount.get
getRelease(repository.owner, repository.name, tagName)
.map { release =>
// Update RELEASE
updateRelease(repository.owner, repository.name, tagName, form.name, form.content)
// Delete and Insert RELEASE_ASSET
val assets = getReleaseAssets(repository.owner, repository.name, tagName)
deleteReleaseAssets(repository.owner, repository.name, tagName)
val files = params.collect {
case (name, value) if name.startsWith("file:") =>
val Array(_, fileId) = name.split(":")
(fileId, value)
}
files.foreach {
case (fileId, fileName) =>
val size =
new java.io.File(getReleaseFilesDir(repository.owner, repository.name), tagName + "/" + fileId).length
createReleaseAsset(repository.owner, repository.name, tagName, fileId, fileName, size, loginAccount)
}
assets.foreach { asset =>
if (!files.exists { case (fileId, _) => fileId == asset.fileName }) {
val file = new java.io.File(
getReleaseFilesDir(repository.owner, repository.name),
release.tag + "/" + asset.fileName
)
FileUtils.forceDelete(file)
}
}
redirect(s"/${release.userName}/${release.repositoryName}/releases/${tagName}")
}
.getOrElse(NotFound())
})
post("/:owner/:repository/releases/:tag/delete")(writableUsersOnly { repository =>
val tagName = params("tag")
getRelease(repository.owner, repository.name, tagName).foreach { release =>
FileUtils.deleteDirectory(new File(getReleaseFilesDir(repository.owner, repository.name), release.tag))
}
deleteRelease(repository.owner, repository.name, tagName)
redirect(s"/${repository.owner}/${repository.name}/releases")
})
}
|
easel/elidable
|
build.sbt
|
<reponame>easel/elidable
scalaVersion := "2.11.8"
crossScalaVersions := Seq(scalaVersion.value)
//
//scalacOptions := Seq("-Xelide-below", "SEVERE") // does not work, 1000 > 2000, not below
//scalacOptions := Seq("-Xelide-below", "WARNING") // does not work, 2000 = 2000, not below
scalacOptions := Seq("-Xelide-below", "MAXIMUM") // works, Int.MaxValue > 2000
//scalacOptions := Seq("-Xelide-below", "OFF") // works, Int.MaxValue > 2000
//scalacOptions := Seq("-Xelide-below", "2001") // works 2001 > 2000
//scalacOptions := Seq("-Xdisable-assertions") // works
|
easel/elidable
|
src/main/scala/Main.scala
|
<reponame>easel/elidable<filename>src/main/scala/Main.scala
object Main extends App {
println("starting")
assert(false, "assertions are enabled")
println("finished without errors")
}
|
littlenag/zparsers
|
src/main/scala/zio/stream/parsers/package.scala
|
package zio.stream
package object parsers {
type ~[+A, +B] = (A, B)
object ~ {
def unapply[A, B](in: (A, B)): Some[(A, B)] = Some(in)
}
}
|
littlenag/zparsers
|
src/main/scala/zio/stream/parsers/KMap.scala
|
<filename>src/main/scala/zio/stream/parsers/KMap.scala<gh_stars>0
package zio.stream.parsers
private[parsers] class KMap[K[_] <: AnyRef, V[_] <: AnyRef] private (delegate: Map[AnyRef, AnyRef]) {
def apply[A](key: K[A]): V[A] = delegate(key).asInstanceOf[V[A]]
def get[A](key: K[A]): Option[V[A]] = delegate.get(key).asInstanceOf[Option[V[A]]]
def +[A](pair: (K[A], V[A])): KMap[K, V] =
new KMap[K, V](delegate + pair.asInstanceOf[(AnyRef, AnyRef)])
def contains[A](key: K[A]): Boolean = delegate contains key
}
private[parsers] object KMap {
def apply[K[_] <: AnyRef, V[_] <: AnyRef](pairs: ((K[A], V[A]) forSome { type A })*): KMap[K, V] =
new KMap[K, V](Map(pairs map { _.asInstanceOf[(AnyRef, AnyRef)] }: _*))
}
|
littlenag/zparsers
|
src/test/scala/scalaz/stream/parsers/ParserSpecs.scala
|
<filename>src/test/scala/scalaz/stream/parsers/ParserSpecs.scala
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalaz.stream
package parsers
import org.specs2.matcher.Matcher
import org.specs2.mutable._
import scalaz._
import scalaz.std.anyVal._
import scalaz.std.string._
import scalaz.syntax.equal._
import scala.collection.SeqLike
import scala.collection.generic.CanBuildFrom
import scala.collection.immutable.StringOps
import scala.util.matching.Regex
object ParserSpecs extends Specification {
import Parser.{Error, Completed, literalRichParser}
"terminal parsers" should {
"parse the empty string" in {
val epsilon: Parser[Char, Unit] = Parser.completed(())
epsilon must parseComplete("").as(())
}
"parse a single token" in {
val a: Parser[Char, Char] = 'a'
a must parseComplete("a").as('a')
}
"produce an error when so defined" in {
val e: Parser[Char, Unit] = Parser.error("oogly boogly")
e must parseError("fubar").as("oogly boogly")
}
}
"parentheses matching" should {
lazy val grammar: Parser[Char, Int] = (
'(' ~> grammar <~ ')' ^^ (1 +)
| Parser.completed(0)
)
"parse the empty string" in {
grammar must parseComplete("").as(0)
}
"parse a single set of parentheses" in {
grammar must parseComplete("()").as(1)
}
"parse four nested sets of parentheses" in {
grammar must parseComplete("(((())))").as(4)
}
"fail to parse a single mismatched paren" in {
grammar must parseError("(").as("unexpected end of stream; expected ')'")
}
"fail to parse three mismatched parens with one match" in {
grammar must parseError("(((()").as("unexpected end of stream; expected ')'")
}
"fail to parse a mismatched closing paren" in {
grammar must parseError(")").as("expected '(', got ')'")
}
}
"an expression evaluator" should {
sealed trait ExprToken
object ExprToken {
final case class Num(n: Int) extends ExprToken
case object Plus extends ExprToken
case object Minus extends ExprToken
case object Times extends ExprToken
case object Div extends ExprToken
case object LParen extends ExprToken
case object RParen extends ExprToken
}
implicit def exprTokenEq[T <: ExprToken]: Equal[T] = Equal.equalA // because I'm lazy
implicit def exprTokenShow[T <: ExprToken]: Show[T] = Show.showA // ditto!
import ExprToken._
val rules: Map[Regex, List[String] => ExprToken] = Map(
"""\s*(\d+)""".r -> { case ns :: Nil => Num(ns.toInt) },
"""\s*\+""".r -> { _ => Plus },
"""\s*-""".r -> { _ => Minus },
"""\s*\*""".r -> { _ => Times },
"""\s*/""".r -> { _ => Div },
"""\s*\(""".r -> { _ => LParen },
"""\s*\)""".r -> { _ => RParen })
def exprTokenize(str: String): Seq[ExprToken] =
regexTokenize(str, rules) collect { case \/-(et) => et }
// %%
lazy val expr: Parser[ExprToken, Int] = (
expr ~ Plus ~ term ^^ { (e1, _, e2) => e1 + e2 }
| expr ~ Minus ~ term ^^ { (e1, _, e2) => e1 - e2 }
| term
)
lazy val term: Parser[ExprToken, Int] = (
term ~ Times ~ value ^^ { (e1, _, e2) => e1 * e2 }
| term ~ Div ~ value ^^ { (e1, _, e2) => e1 / e2 }
| value
)
// type inference and invariance sort of failed me here...
lazy val value: Parser[ExprToken, Int] = (
(LParen: Parser[ExprToken, ExprToken]) ~> expr <~ RParen
| (Parser pattern { case Num(n) => n })
)
// %%
"tokenize a number" in {
exprTokenize("42") mustEqual Seq(Num(42))
}
"parse a number" in {
expr must parseComplete(exprTokenize("42")).as(42)
expr must parseComplete(exprTokenize("12")).as(12)
}
"parse a simple addition expression" in {
expr must parseComplete(exprTokenize("1 + 2")).as(3)
}
"parse a complex composition of all four operators" in {
expr must parseComplete(exprTokenize("228 * 4 + 12")).as(924)
expr must parseComplete(exprTokenize("123 + 228 * 4 + 12")).as(1047)
expr must parseComplete(exprTokenize("123 - 2 + 228 * 4 + 12")).as(1045)
expr must parseComplete(exprTokenize("123 - 2 + 228 * 4 + 12 / 4 + 79")).as(1115)
expr must parseComplete(exprTokenize("123 - 2 + 228 * 4 + 12 / 4 + 79 * 5")).as(1431)
}
// TODO more expr tests
}
// TODO maybe move this to a Util object? seems useful
def parse[T, R](parser: Parser[T, R])(str: Seq[T]): Error[T, R] \/ Completed[T, R] = {
def inner(str: Seq[T])(parser: Parser[T, R]): State[Parser.Cache[T], Error[T, R] \/ Completed[T, R]] = {
if (str.isEmpty) {
State state parser.complete()
} else {
parser match {
case Completed(_) => State state -\/(Error("unexpected end of stream"))
case e @ Error(_) => State state -\/(e)
case parser: Parser.Incomplete[T, R] =>
parser derive str.head flatMap inner(str.tail)
}
}
}
inner(str)(parser) eval Parser.Cache[T]
}
// TODO this also seems useful...
def tokenize[Str[_] <: SeqLike[_, _], TokenIn, TokenOut, That <: TraversableOnce[TokenIn \/ TokenOut]](str: Str[TokenIn])(f: Str[TokenIn] => (TokenIn \/ TokenOut, Str[TokenIn]))(implicit cbf: CanBuildFrom[Str[TokenIn], TokenIn \/ TokenOut, That]): That = {
if (str.isEmpty) {
cbf().result
} else {
val (token, tail) = f(str)
val builder = cbf()
builder += token
builder ++= tokenize(tail)(f) // TODO it's never worse, tail-recurse!
builder.result
}
}
// TODO oh look, more useful stuff!
def regexTokenize[T](str: String, rules: Map[Regex, List[String] => T]): Seq[Char \/ T] = {
def iseqAsCharSeq(seq: IndexedSeq[Char]): CharSequence = new CharSequence {
def charAt(i: Int) = seq(i)
def length = seq.length
def subSequence(start: Int, end: Int) = iseqAsCharSeq(seq.slice(start, end))
override def toString = seq.mkString
}
tokenize(str: IndexedSeq[Char]) { seq =>
val str = iseqAsCharSeq(seq)
// find the "first" regex that matches and apply its transform
val tokenM: Option[(T, IndexedSeq[Char])] = rules collectFirst {
case (regex, f) if (regex findPrefixMatchOf str).isDefined => {
val m = (regex findPrefixMatchOf str).get
(f(m.subgroups), m.after.toString: IndexedSeq[Char])
}
}
tokenM map {
case (token, tail) => (\/-(token), tail)
} getOrElse ((-\/(seq.head), seq.tail))
}
}
//
// custom matchers
//
def parseComplete[T](str: Seq[T]) = new {
def as[R: Equal](result: R): Matcher[Parser[T, R]] = {
def body(parser: Parser[T, R]) = {
parse(parser)(str) match {
case \/-(Completed(r)) => r === result
case -\/(_) => false
}
}
def error(parser: Parser[T, R]) = parse(parser)(str) match {
case -\/(Error(str)) => s"produces error: $str"
case \/-(Completed(r)) => s"produces result $r rather than expected $result"
}
(body _,
Function.const("parses successfully") _,
error _)
}
}
def parseError[T](str: Seq[T]) = new {
def as[R](msg: String): Matcher[Parser[T, R]] = {
def body(parser: Parser[T, R]) = {
parse(parser)(str) match {
case \/-(Completed(r)) => false
case -\/(Error(msg2)) => msg === msg2
}
}
def error(parser: Parser[T, R]) = parse(parser)(str) match {
case -\/(Error(msg2)) => s"produced error '$msg2' and not '$msg'"
case \/-(_) => "completed and did not error"
}
(body _,
Function.const(s"produces error $msg") _,
error _)
}
}
}
|
littlenag/zparsers
|
src/main/scala/zio/stream/parsers/Parsers.scala
|
package zio.stream.parsers
import cats._
import cats.data._
import cats.syntax.eq._
import cats.syntax.monad._
import cats.syntax.show._
import cats.syntax.either._
object Syntax {
type \/[A,B] = Either[A,B]
def -\/[A, B](a: A): Either[A, B] = Left(a)
def \/-[A, B](b: B): Either[A, B] = Right(b)
}
import Syntax._
trait Parsers {
// Type of events are we are parsing on
type EventIn
// Types of events we'll emit on partial matches
type EventOut
/**
* Applicative (not monadic!) parser interface defined by two functions (simplified types):
*
* - `complete: Error \/ Completed`
* - `derive: State[Cache, Parser]`
*
* The `derive` function is only defined on parsers of the subtype, `Incomplete`. The `complete`
* function is defined on all parsers, where the following axioms hold:
*
* - `complete Completed = \/-(Completed)`
* - `complete Error = -\/(Error)`
* - `complete Incomplete = ???`
*
* Which is to say that an "Incomplete" parser may be completable, but is also guaranteed to have
* potential subsequent derivations. A "Complete" or "Error" parser do not have any further
* derivations, but their completeness is guaranteed. An example of an incomplete parser that has
* subsequent possible derivations but is still completeable is the following:
*
* lazy val parens = (
* '(' ~ parens ~ ')'
* | completed
* )
*
* The `parens` parser may be completed immediately, since it contains a production for the empty
* string. However, it may also be derived, and the only valid derivation for it is over the '('
* token. The resulting parser from that derivation *cannot* be completed, since it would require
* a matching paren in order to represent a valid input.
*
* A parser which starts as Incomplete and then becomes either Completed or Error might be something
* like the following:
*
* lazy val foo = literal('a')
*
* The `foo` parser is Incomplete and not completable (it contains no production for the empty string).
* However, it may be derived over the token 'a' to produce a Completed parser (which will actually
* be of runtime type Completed). If it is derived over any other token, it will produce an Error
* parser.
*
* Thus, unlike many parser combinators encodings, this one encodes the result algebra directly in
* the parser itself. This has several advantages from a usability standpoint. It does, however,
* make the encoding somewhat convoluted in a few places from an implementation standpoint. Hopefully
* those convolutions do not leak into user space...
*/
sealed trait Parser[T] {
/**
* Attempts to complete the parser, under the assumption that the stream has terminated. If the
* parser contains a production for the empty string, it will complete and produce its result.
* Otherwise, if no ε-production exists, an error will be produced.
*
* This function allows evaluators to request early termination on a possibly-unbounded incremental
* parse. For example, one might define a JSON grammar which parses an unbounded number of JSON
* values, returning them as a list. Such a grammar could complete early so long as the prefix
* string of tokens defines a complete and self-contained JSON value. This is a desirable property
* for stream parsers, as it allows the evaluation to be driven (and halted) externally.
*
* Any parsers specified in the `seen` set will be treated as already traversed, indicating a cycle
* in the graph. Thus, if the traversal recursively reaches these parsers, that node will complete
* to an error. For a good time with the whole family, you can invoke `prsr.complete(Set(prsr))`,
* which will produce an `Error("divergent")` for all non-trivial parsers (namely, parsers that
* are not `Complete` or `Error` already).
*/
def complete[R](seen: Set[Parser[_]] = Set()): Either[Error[R], Completed[T]]
/**
* Parsers are functors, how 'bout that? Note the lack of flatMap, though. No context-sensitive
* parsers allowed.
*/
def map[U](f: T => U): Parser[U]
}
// yep, indexing on value identity LIKE A BOSS
type Cache = KMap[Lambda[α => (EventIn, Parser[α])], Lambda[α => () => Parser[α]]]
//type Cache = KMap[({type λ[α] = (EventIn, Parser[α])})#λ, ({type λ[α] = () => Parser[α]})#λ]
// creates an empty cache
def Cache = KMap[({type λ[α] = (EventIn, Parser[α])})#λ, ({type λ[α] = () => Parser[α]})#λ]()
/**
* Parser for the empty string, producing a given result.
*/
def completed[Result](r: Result): Parser[Result] = Completed(r)
/**
* Parser that is already in the error state. Generally speaking, this is probably
* only useful for internal plumbing.
*/
def error[Result](msg: String): Parser[Result] = Error(msg)
/**
* Parser for a single literal token, producing that token as a result. Parametricity!
*/
implicit def literal(token: EventIn)(implicit ev1: Eq[EventIn], ev2: Show[EventIn]): Parser[EventIn] = new Incomplete[EventIn] {
override val toString: String = s"lit(${token.show})"
def innerComplete[R](seen: Set[Parser[_]]) = Left(Error(s"unexpected end of stream; expected '${token.show}'"))
def innerDerive(candidate: EventIn): State[Cache, Parser[EventIn]] = {
val result: Parser[EventIn] =
if (candidate === token)
completed(token)
else
error(s"expected '${token.show}', got '${candidate.show}'")
State pure result
}
}
def pattern[T](pf: PartialFunction[EventIn, T])(implicit ev2: Show[EventIn]): Parser[T] = new Incomplete[T] {
override val toString: String = s"pattern(...)"
def innerComplete[R](seen: Set[Parser[_]]) = Left(Error(s"unexpected end of stream"))
def innerDerive(candidate: EventIn) = {
val result: Parser[T] = if (pf isDefinedAt candidate)
completed(pf(candidate))
else
error(s"'${candidate.show}' did not match the expected pattern")
State pure result
}
}
//
// syntax
//
// implicit chaining for literal syntax
implicit def literalRichParser(token: EventIn)(implicit ev1: Eq[EventIn], ev2: Show[EventIn]): RichParser[EventIn] =
new RichParser(literal(token))
// it's somewhat important that these functions be lazy
implicit class RichParser[Result](left: => Parser[Result]) {
def as[Result2](f: => Result2): Parser[Result2] = left map (_ => f)
// alias for map
def ^^[Result2](f: Result => Result2): Parser[Result2] = left map f
def ~>[Result2](right: => Parser[Result2]): Parser[Result2] =
left ~ right ^^ { (_, r) => r }
def <~[Result2](right: => Parser[Result2]): Parser[Result] =
left ~ right ^^ { (l, _) => l }
// alias for andThen
def ~[Result2](right: => Parser[Result2]) = andThen(right)
def andThen[Result2](right: => Parser[Result2]): Parser[Result ~ Result2] = {
new SeqParser(left, right)
}
// alias for orElse
def |(right: => Parser[Result]) = {
orElse(right)
}
def orElse(right: => Parser[Result]): Parser[Result] =
new UnionParser(left, right)
}
implicit class Caret2[A, B](self: Parser[A ~ B]) {
def ^^[Z](f: (A, B) => Z): Parser[Z] = self map {
case a ~ b => f(a, b)
}
}
implicit class Caret3L[A, B, C](self: Parser[(A ~ B) ~ C]) {
def ^^[Z](f: (A, B, C) => Z): Parser[Z] = self map {
case (a ~ b) ~ c => f(a, b, c)
}
}
implicit class Caret3R[A, B, C](self: Parser[A ~ (B ~ C)]) {
def ^^[Z](f: (A, B, C) => Z): Parser[Z] = self map {
case a ~ (b ~ c) => f(a, b, c)
}
}
implicit class Caret4LL[A, B, C, D](self: Parser[((A ~ B) ~ C) ~ D]) {
def ^^[Z](f: (A, B, C, D) => Z): Parser[Z] = self map {
case ((a ~ b) ~ c) ~ d => f(a, b, c, d)
}
}
implicit class Caret4LR[A, B, C, D](self: Parser[(A ~ (B ~ C)) ~ D]) {
def ^^[Z](f: (A, B, C, D) => Z): Parser[Z] = self map {
case (a ~ (b ~ c)) ~ d => f(a, b, c, d)
}
}
implicit class Caret4RL[A, B, C, D](self: Parser[A ~ ((B ~ C) ~ D)]) {
def ^^[Z](f: (A, B, C, D) => Z): Parser[Z] = self map {
case a ~ ((b ~ c) ~ d) => f(a, b, c, d)
}
}
implicit class Caret4RR[A, B, C, D](self: Parser[A ~ (B ~ (C ~ D))]) {
def ^^[Z](f: (A, B, C, D) => Z): Parser[Z] = self map {
case a ~ (b ~ (c ~ d)) => f(a, b, c, d)
}
}
//
// algebra
//
// note that this is *not* a NEL; we're going to forbid global ambiguity for now
final case class Completed[T](result: T) extends Parser[T] {
def complete[R](seen: Set[Parser[_]]) = \/-(this)
def map[U](f: T => U): Completed[U] = Completed(f(result))
}
// yep! it's a string. deal with it
final case class Error[T](msg: String) extends Parser[T] {
def complete[R](seen: Set[Parser[_]]) = Left(Error(msg))
def map[U](f: T => U): Error[U] = Error(msg)
}
object Error {
implicit def monoid[T]: Monoid[Error[T]] = new Monoid[Error[T]] {
def empty = Error("")
def combine(e1: Error[T], e2: Error[T]): Error[T] =
Error(s"${e1.msg} and(0) ${e2.msg}")
}
}
// An incomplete parse needs to be able to return a list of events to emit
// again with the co-routine like parsing structure
// could be made more performant by using special control flow structures that a quoted DSL could parse out
// co-routine makes passing state easier, since otherwise would have to thread through the parser combinator constructors
sealed trait Incomplete[Result] extends Parser[Result] {
outer =>
def map[Result2](f: Result => Result2): Parser[Result2] = new Incomplete[Result2] {
override def innerComplete[R](seen: Set[Parser[_]]) =
outer.complete[R](seen).bimap(identity, _ map f)
override def innerDerive(candidate: EventIn): State[Cache, Parser[Result2]] = {
val x = outer innerDerive candidate
x map { p => p.map(f) }
}
override lazy val toString: String = s"Incomplete.map"
}
override def toString: String = "Incomplete"
final def complete[R](seen: Set[Parser[_]]): Either[Error[R], Completed[Result]] = {
// as a side note, this comparison being on pointer identity is the reason this algorithm is O(k^n)
// if we could magically compare parsers on equivalence of the language they generate, the algorithm
// would be O(n^2), even if I reenabled global ambiguity support. SO CLOSE!
if (seen contains this)
Left(Error("divergent"))
else
innerComplete[R](seen + this)
}
protected def innerComplete[R](seen: Set[Parser[_]]): Either[Error[R], Completed[Result]]
/**
* Progresses the parse over a single token and returns the continuation (as a parser). Note that
* the cache carried in the state monad is very important and must be preserved for the duration
* of an uncompletable parse. Once a parser resulting from this derivation is completable, that
* completion may be invoked and the state dropped. Dropping state in the middle of an incomplete
* parse will yield unsound results and possibly divergent parse trails!
*
* As the parametricity implies, this derivation function does not advance the parse over anything
* more than a single token, even if that single token taken in context with the state of the
* parse coming in cannot yield a valid output. For example, imagine a parser for matching
* parentheses. One could advance the parser over a token representing '('. This could not
* possibly yield a completable parser, since it is impossible for a correctly formed parentheses
* grammar to find a match for a newly-opened parenthetical. However, the derivation function
* will still return immediately after consuming the '(' token. The resulting parser can be used
* to advance over subsequent tokens, but cannot be completed then-and-there (attempting to do
* so would result in an Error).
*/
final def derive(t: EventIn): State[Cache, Parser[Result]] = {
for {
cache <- State.get[Cache]
derived <- cache get (t -> this) map { thunk =>
val t = thunk()
println(s"--in cache: $t")
State.pure[Cache, Parser[Result]](t)
} getOrElse {
for {
_ <- State.pure(println(s"--not in cache"))
derived <- innerDerive(t)
_ <- State.pure(println(s"--after inner derive: $derived"))
cache2 <- State.get[Cache]
_ <- State set (cache2 + ((t, this) -> { () => derived }))
} yield derived
}
} yield derived
}
protected def innerDerive(candidate: EventIn): State[Cache, Parser[Result]]
}
//
// typeclasses
//
implicit val parserInstance: Applicative[Parser[*]] = new Applicative[Parser[*]] {
def pure[A](a: A): Parser[A] = completed(a)
def ap[A, B](f: Parser[A => B])(fa: Parser[A]): Parser[B] =
fa ~ f ^^ { (a, f) => f(a) }
}
class SeqParser[LR, RR](_left: => Parser[LR], _right: => Parser[RR]) extends Incomplete[LR ~ RR] {
private lazy val left = _left
private lazy val right = _right
override lazy val toString: String = s"(${left}) ~ (${right})"
def innerComplete[R](seen: Set[Parser[_]]): Error[R] \/ Completed[LR ~ RR] = for {
clr <- left.complete[R](seen)
crr <- right.complete[R](seen)
} yield Completed((clr.result, crr.result))
def innerDerive(t: EventIn): State[Cache, Parser[LR ~ RR]] = {
println(s"---seq inner derive. l=$left, r=$right")
(left, right) match {
// deriving after completing is an error
case (Completed(_), Completed(_)) | (Completed(_), Error(_)) => State.pure(Error("unexpected end of stream"))
case (Error(msg), _) => State.pure(Error(msg))
case (_, Error(msg)) => State.pure(Error(msg))
case (Completed(lr), right: Incomplete[RR]) => for {
rp <- right derive t
} yield rp map {
(lr, _)
} // fails fast?
case (left: Incomplete[LR], right: Incomplete[RR]) => {
left.complete(Set()).toOption.map {
case Completed(lr) => {
for {
lp <- left derive t
rp <- right derive t
} yield {
//val x = lp ~ right | (rp map { (lr, _) })
(lp, rp) match {
case (Error(msg1), Error(msg2)) => Error[LR ~ RR](s"$msg1 and(2) $msg2")
case (Error(_), _) => rp map {
(lr, _)
}
case (_, Error(_)) => lp ~ right
case (_, _) => lp ~ right | (rp map {
(lr, _)
})
}
}
}
} getOrElse {
for {
lp <- left derive t
} yield {
//lp ~ right
lp match {
case Error(msg) => Error(msg)
case _ => lp ~ right
}
}
}
}
}
}
}
class UnionParser[Result](_left: => Parser[Result], _right: => Parser[Result]) extends Incomplete[Result] {
private lazy val left = _left
private lazy val right = _right
override lazy val toString: String = s"(${left}) | (${right})"
def innerComplete[R](seen: Set[Parser[_]]): Either[Error[R], Completed[Result]] = {
(left.complete[R](seen), right.complete[R](seen)) match {
case (Right(Completed(_)), Right(Completed(_))) => Either.left(Error("global ambiguity detected"))
case (lr@Right(Completed(_)), Left(Error(_))) => lr
case (Left(Error(_)), rr@Right(Completed(_))) => rr
case (Left(Error(msg)), Left(Error(msg2))) => {
if (msg == msg2)
Left(Error(msg))
else
Left(Error(s"$msg and(1) $msg2"))
}
}
}
def innerDerive(t: EventIn): State[Cache, Parser[Result]] = (left, right) match {
case (Error(leftMsg), Error(rightMsg)) => State.pure(Error(s"$leftMsg -OR- $rightMsg"))
case (Error(_), Completed(_)) => State.pure(Error("unexpected end of stream"))
case (Completed(_), Error(_)) => State.pure(Error("unexpected end of stream"))
case (Completed(_), Completed(_)) => State.pure(Error("unexpected end of stream"))
case (Error(_) | Completed(_), right: Incomplete[Result]) => right derive t
case (left: Incomplete[Result], Error(_) | Completed(_)) => left derive t
case (left: Incomplete[Result], right: Incomplete[Result]) => State { cache =>
//lazy val xx = left derive t run cache2
lazy val (cache3, lp) = left derive t run cache2 value
lazy val (cache4, rp) = right derive t run cache3 value
lazy val back: Parser[Result] = lp | rp
lazy val cache2: Cache = cache + ((t, this) -> { () => back })
// Short circuit if both sides fail
(lp, rp) match {
case (Error(l), Error(r)) => (cache4, Error(s"$l -AND- $r"))
case (_, _) => (cache4, lp | rp)
}
}
}
}
}
|
littlenag/zparsers
|
build.sbt
|
<reponame>littlenag/zparsers
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//import com.typesafe.sbt.SbtGit._
organization := "io.github.littlenag"
name := "zparsers" //"sparse" zroutines?
scalaVersion := "2.12.10"
addCompilerPlugin("org.typelevel" %% "kind-projector" % "0.11.0" cross CrossVersion.full)
resolvers += "Scalaz Bintray Repo" at "https://dl.bintray.com/scalaz/releases"
//val zioVersion = "1.0.0-RC20+43-6bbfac81-SNAPSHOT"
val zioVersion = "1.0.0-RC21-2"
libraryDependencies ++= Seq(
"com.lihaoyi" %% "sourcecode" % "0.1.9",
"org.scalaz" %% "scalaz-core" % "7.2.30",
"org.scalaz.stream" %% "scalaz-stream" % "0.8.6a",
// Coroutines!
"io.github.littlenag" %% "coroutines-impl" % "0.9-SNAPSHOT",
"dev.zio" %% "zio" % zioVersion,
"dev.zio" %% "zio-streams" % zioVersion,
"org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.2",
"co.fs2" %% "fs2-core" % "2.2.1",
"com.lihaoyi" %% "fastparse" % "2.2.2",
"com.typesafe.akka" %% "akka-stream" % "2.6.5",
"io.monix" %% "monix" % "3.2.1",
"io.monix" %% "monix-eval" % "3.2.1",
"io.monix" %% "monix-tail" % "3.2.1",
"org.specs2" %% "specs2-core" % "4.9.3" % Test,
"org.scalatest" %% "scalatest" % "3.1.1" % Test
)
scalacOptions in Test ++= Seq("-Yrangepos")
logBuffered in Test := false
licenses += ("Apache-2.0", url("http://www.apache.org/licenses/"))
publishMavenStyle := true
//versionWithGit
// I prefer not to hinder my creativity by predicting the future
//git.baseVersion := "master"
//bintraySettings
|
littlenag/zparsers
|
src/main/scala/scalaz/stream/parsers/KMap.scala
|
<reponame>littlenag/zparsers<filename>src/main/scala/scalaz/stream/parsers/KMap.scala
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalaz.stream
package parsers
private[parsers] class KMap[K[_] <: AnyRef, V[_] <: AnyRef] private (delegate: Map[AnyRef, AnyRef]) {
def apply[A](key: K[A]): V[A] = delegate(key).asInstanceOf[V[A]]
def get[A](key: K[A]): Option[V[A]] = delegate.get(key).asInstanceOf[Option[V[A]]]
def +[A](pair: (K[A], V[A])): KMap[K, V] =
new KMap[K, V](delegate + pair.asInstanceOf[(AnyRef, AnyRef)])
def contains[A](key: K[A]): Boolean = delegate contains key
}
private[parsers] object KMap {
def apply[K[_] <: AnyRef, V[_] <: AnyRef](pairs: ((K[A], V[A]) forSome { type A })*): KMap[K, V] =
new KMap[K, V](Map(pairs map { _.asInstanceOf[(AnyRef, AnyRef)] }: _*))
}
|
littlenag/zparsers
|
src/main/scala/playground/AkkaStreamPlayground.scala
|
<filename>src/main/scala/playground/AkkaStreamPlayground.scala
package playground
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
class AkkaMap[A, B](f: A => B) extends GraphStage[FlowShape[A, B]] {
val in = Inlet[A]("Map.in")
val out = Outlet[B]("Map.out")
override val shape = FlowShape.of(in, out)
override def createLogic(attr: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
setHandler(in, new InHandler {
override def onPush(): Unit = {
push(out, f(grab(in)))
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
pull(in)
}
})
}
}
|
littlenag/zparsers
|
src/test/scala/zio/stream/parsers/ZioStreamSpecs.scala
|
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package zio.stream.parsers
//import Matcher._
import cats._
import cats.implicits._
import zio._
import zio.stream._
import org.scalatest._
import org.scalatest.matchers.must.Matchers._
object CharParsers extends Parsers with StreamMatchers {
override type EventIn = Char
//implicit override val showEventIn: Show[EventIn] = catsStdShowForChar
val letterA: Parser[Char] = 'A'
val letterB: Parser[Char] = 'B'
val AorB: Parser[Char] = letterA | letterB
val AB: Parser[String] = letterA ~ letterB ^^ { (_,_) => "AB" }
val AA: Parser[Int] = (letterA ~ letterA) ^^ ((_, _) => 1)
lazy val parens: Parser[Int] = (
('(' ~> parens) <~ ')' ^^ (1 +)
| completed(0)
)
lazy val parens0: Parser[Int] = (
(('(' ~> parens) <~ ')')
| completed(0)
)
def run[E, A](zio: => ZIO[ZEnv, E, A]): A = Runtime.default.unsafeRun(zio)
def parseToEvents[R](parser: Parser[R])(events: Seq[EventIn]): Seq[ParseResult[R]] = {
run((ZStream(events:_*) >>> matchCutToEvents(parser)).runCollect)
}
def parseSuccess[R](parser: Parser[R])(events: Seq[EventIn]): Chunk[R] = {
run((ZStream(events:_*) >>> matchCut(parser)).runCollect)
}
}
object TickParsers extends Parsers {
case class Tick(v: Int, t: Int)
override type EventIn = Tick
implicit val showEventIn: Show[Tick] = Show.fromToString
// the lack of flatMap means that we can't actually detect three increasing values in an intuitive way
lazy val increasing: Parser[Int] = (
pattern[Int] {
case a => a.v
} ~
pattern[Int] {
case a => a.v
} ~
pattern[Int] {
case a => a.v
} ^^ ((a,b,c) => if (c > b && b > a) 1 else 0 )
)
}
class ZioStreamSpecs extends wordspec.AnyWordSpec {
import CharParsers._
"character stream parse simple" should {
"parse a" in {
parseSuccess(AA)("AA") mustEqual Seq(1)
}
}
"character stream parsing" should {
"parse single a" in {
parseToEvents(AA)("AA") mustEqual Seq(ParseIncomplete, ParseSuccess(1), ParseEnded)
}
"parse one A expecting two" in {
parseToEvents(AA)("A") mustEqual Seq(ParseIncomplete, ParseEnded)
}
"parse parens" in {
parseToEvents(parens)("()") mustEqual Seq(ParseIncomplete, ParseSuccess(1), ParseEnded)
parseToEvents(parens)("(((())))").takeRight(3) mustEqual Seq(ParseIncomplete, ParseSuccess(4), ParseEnded)
}
"parse unexpected characters correctly" in {
parseToEvents(parens0)("(b)") mustEqual Seq(ParseIncomplete, ParseFailure("expected '(', got 'b' and(2) expected ')', got 'b'"), ParseFailure("expected '(', got ')'"), ParseEnded)
}
"parse B expecting A" in {
parseToEvents(letterA)("B") mustEqual Seq(ParseFailure("expected 'A', got 'B'"), ParseEnded)
}
"parse single A or B" in {
parseToEvents(AorB)("A") mustEqual Seq(ParseSuccess('A'), ParseEnded)
parseToEvents(AorB)("B") mustEqual Seq(ParseSuccess('B'), ParseEnded)
}
"parse A then B" in {
parseToEvents(AB)("AB") mustEqual Seq(ParseIncomplete, ParseSuccess("AB"), ParseEnded)
}
}
}
|
littlenag/zparsers
|
src/main/scala/scalaz/stream/parsers/package.scala
|
<reponame>littlenag/zparsers
/*
* Copyright 2015 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalaz.stream
import scalaz._
import scalaz.syntax.show._
package object parsers {
import Process._
type ~[+A, +B] = (A, B)
object ~ {
def unapply[A, B](in: (A, B)): Some[(A, B)] = Some(in)
}
/**
* Greedily parses and emits as rapidly as possible. Note that this may not be
* exactly what you want! The exact semantics here are to use the parser to
* consume tokens as long as the parser *requires* more tokens in order to emit
* a valid output, or until the parser derives an error on all branches. Thus,
* as soon as the parser can possibly emit a valid value, it will do so. At
* that point, the parser state is flushed and restarted with the next token.
* This is sort of the moral equivalent of the `findPrefixOf` function on
* Regex, but applied repeatedly to the input stream.
*
* I can't quite contrive a scenario where these semantics result in undesirable
* behavior, but I'm sure they exist. If nothing else, it seems clear that there
* are a lot of arbitrary defaults baked into these semantics, and it would be
* nice to have a bit more control.
*
* Sidebar: this function *does* attempt to prevent completely vacuuous parse
* results. Providing a parser which accepts the empty string will not result
* in an infinite stream of successes without consuming anything.
*/
def parse[Token: Show, Result](parser: Parser[Token, Result]): Process1[Token, String \/ Result] = {
import Parser._
def inner(parser: Parser[Token, Result]): State[Cache[Token], Process1[Token, String \/ Result]] = State.get[Cache[Token]] map { cache =>
val toEmit = parser.complete[Result]().bimap({ _.msg }, { _.result })
val x = receive1Or[Token, String \/ Result](emit(toEmit)) { token =>
parser match {
case _ @ Completed(_) => emit(-\/(s"input parser does not expect further input; got ${token.shows}"))
case _ @ Error(str) => emit(-\/(str))
case parser: Incomplete[Token, Result] => {
val innerState: State[Cache[Token], Process1[Token, String \/ Result]] = for {
derived <- parser derive token
back <- derived.complete() match {
case -\/(Error(_)) => inner(derived)
case \/-(Completed(r)) => State.state[Cache[Token], Process1[Token, String \/ Result]](emit(\/-(r)))
}
} yield back
/*
* This is hugely tricky. We're throwing away our derived state here. This is FINE
* though, because we're not going to preserve the state into the next parse trail!
* Basically, this line is taking advantage of the fact that we begin each parse with
* an empty cache and we *don't* delegate to `inner` multiple times in the above
* for-comprehension. If we were to repeatedly delegate to `inner`, the following line
* would be unsound.
*/
innerState eval cache
}
}
}
x
}
// parse as much as we can, restarting with each completed parse
(inner(parser) eval Cache[Token]) ++ parse(parser)
}
}
|
littlenag/zparsers
|
src/main/scala/playground/ParsersPlayground.scala
|
<reponame>littlenag/zparsers
package playground
object ParsersPlayground {
/**
*
* a matcher is already a driver program for the coroutine that a parser generates
* parsers generates coroutines that compose, that's the only magic
*
* when a matcher evaluates the parser on the next input, could probably just compile
* the parser to a state monad?
*
* parser needs an algebra for communicating with the matcher
*
* matchers and handlers as names?
*/
/**
distinguish bounded (time or length) from unbounded from "instantaneous" operations
Seq could require two bounded ops
Union parser "|" would have a bound on the entire collection of ops
Access an AND parser inside a mustContain window
MustMatch window
Matches - means an exact match of the complete buffer
Contains - means some exists one or more time in the buffer
Starts with
Ends with
sequence matcher?
event matcher?
items, elements, messages, events, commands, datagrams
https://softwaremill.com/windowing-data-in-akka-streams/
After deciding on using event- or processing-time for events, we then have to decide how to
divide the continuous time into discrete chunks. Here usually there are two options. The
first is tumbling windows, parametrised by length. Time is divided into non-overlapping parts
and each data element belongs to a single window. E.g. if the length is 30 minutes, the windows
would be [12:00, 12:30), [12:30, 13:00), etc. The second option are sliding windows,
parametrised by length and step. These windows overlap, and each data element can belong to
multiple windows. For example if the length is 30 minutes, and step 5 minutes, the windows would
be [12:00, 12:30), [12:05, 12:35), etc.
===> problem since you would want to apply patterns to windows
push (zio streams) vs
pull (scalaz streams) vs
dynamic push/pull (akka streams)
https://softwaremill.com/comparing-akka-stream-scalaz-stream/
*/
// stream of mouse click events
// double click
// - two click events in a short time
case class MouseClick(button: Int, time: Long /* nanos */ )
case object DoubleClick
/*
// Clicks in, DoubleClicks out
object MouseEvents
extends EventPatternComponent[MouseClick, DoubleClick.type] {
// From EventPatternComponent
val in = stream[MouseClick]
val defaultSeqTimeout = 100 seconds
val doubleClickCutoff = 100.ms
// match buffer start - requires evidence of a buffer
Parser.bufferStart
// match buffer end - requires evidence of a buffer
Parser.bufferEnd
// Parsers provide the patterns to match on
// applying a parser to a stream should result in
// parsers transform streams of type A into streams of type B
// compiling a parser should result in a transducer
val button1 = filter(_.button == 1)
// A followed by another A within 50 ms, < 50.ms within, > 50.ms not within 50.ms?
val doubleClickSeq =
(button1 ~ (doubleClickCutoff, button1)).as(DoubleClick)
// how does the window slide forward? on event? via timer?
// is there a movement policy? as is this would yield successive DoubleClicks for clicks spaced closely together
val doubleClickInTime = Parser
.slidingWindow(doubleClickCutoff) // SlidingWindowParser extends Parser
.contains(button1 ~ button1)
.as(DoubleClick)
val doubleClickInLengthT1 = Parser // SizedWindowParser extends Parser
.slidingWindow(2)
.matches {
button1 ~ (doubleClickCutoff, button1)
}
.as(DoubleClick)
val doubleClickInLengthT2 = Parser
.slidingWindow(2)
.^^ {
case (e1, e2)
if (e1.time + 50.ms > e2.time) && e1.button == 1 && e2.button == 1 =>
DoubleClick
}
// builder pattern for stateful parsers (react builder inspired), this is very monix-like
val statefulParser = Parser
.stateful(0)
.onMessage {
case (events, curState) => (emit(events), curState + events.length) /* work like Push.emit Push.next */
}
.onEnd {
case curState => (next, 0) /* work like Push.emit Push.next */
}
.build
// spill combines quill's quoted DSL with graph dsl from akka streams
// introduce streams graph dsl
// should be similar to the akka graph dsl, define a class than can materialize an operator in some API
// class should be able to connect inputs and outputs, and introduce new Sources for forking output
// Source and Sinks would not necessarily be a concern outside of timers and output ports
val button1 /*: Parser[MouseClick] */ = quote {
filter(_.button == 1)
}
// A followed by another A within 50 ms, < 50.ms within, > 50.ms not within 50.ms?
val doubleClickSeq =
(button1 ~ (doubleClickCutoff, button1)).as(DoubleClick)
val doubleClicksStream /* : Stream[DoubleClick] */ = quote {
stream[MouseClick] matching doubleClickSeq
}
// An incomplete parse needs to be able to return a list of events to emit
// again with the co-routine like parsing structure
// could be made more performant by using special control flow structures that a quoted DSL could parse out
// co-routine makes passing state easier, since otherwise would have to thread through the parser combinator constructors
// using a co-routine feels very much like a transducer, or riemann transducer
// parser as transducer
val doubleClicksStream2 /* : Stream[DoubleClick] */ = quote {
// match on the last 10 events (sliding window)
stream[MouseClick] >>> Matcher.matchWindowed(10.sliding, doubleClickSeq)
// match on tumbling windows of 10 events
stream[MouseClick] >>> Matcher.matchWindowed(10.tumbling, doubleClickSeq)
// discard any events consumed by the parser
stream[MouseClick] >>> Matcher.matchCut(doubleClickSeq)
// retry the parser on the next event in the stream, discarding only the first event
stream[MouseClick] >>> Matcher.matchEvery(doubleClickSeq)
// Matcher: similar to state monad
// state is here to record information about emitted or dropped events
// parser => state, eventBuffer, streamContinuation => state, eventBuffer, streamContinuation
// parser => (state (may contains eventBuffer, will contain current parser state), nextEvent) => (state, List[Event] (to emit downstream))
// this would be pretty reasonable to implement in zio, ZMatcher, ZParser
// ZMatcher is a special kind of Transducer
// knows how to apply a ZParser, which is a special kind of parser combinator
}
*/
// Matchers should probably stay as they are, could take Parser defined as coroutines
// maybe special case Matcher.fromCoroutine(..) in case it makes lots of sense
/*
what would a coroutine-based parser look like? pretty simple actually
val id = coroutine { (x: Int) => x }
val vowelcounts = coroutine { (s: String) =>
yieldval(s.count(_ == 'a'))
yieldval(s.count(_ == 'e'))
yieldval(s.count(_ == 'i'))
yieldval(s.count(_ == 'o'))
yieldval(s.count(_ == 'u'))
}
val button1 = filter(_.button == 1)
// A followed by another A within 50 ms, < 50.ms within, > 50.ms not within 50.ms?
val doubleClickSeq =
(button1 ~ (doubleClickCutoff, button1)).as(DoubleClick)
// A followed by another A within 50 ms
val doubleClickCR = coroutine { (ctx: StreamContext) =>
val b1 = next()
if (b1.button != 1) fail()
val b2 = nextWithin(50.ms)
if (b2.button != 1) fail()
yieldval(DoubleClick)
}
// http://storm-enroute.com/coroutines/docs/faq/
// http://reactors.io/
scala coroutines reactors
// https://wiki.tcl-lang.org/page/parsing+with+coroutine
// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.74.6048&rep=rep1&type=pdf
// From EventPatternComponent
override def out /* : Stream[DoubleClick] */ = quote {
in.filter(_.button == 1) matching doubleClickSeq
}
override def out /* : Stream[DoubleClick] */ = matching doubleClickSeq
// SingleClick | DoubleClick | TripleClick then choose TripleClick
}
val ztransducer: ZTransducer[Any, _, MouseClick, DoubleClick.type] =
MouseEvents.doubleClicksStream.compile[ZioTransducer]
// instantiating the graph would be a stream with a single in and single out
val ztransducer: ZTransducer[Any, _, MouseClick, DoubleClick.type] =
MouseEvents.compile[ZioTransducer]
*/
// parser combinators are
// Input -> (Result, Input)
// riemann follows a similar style to react, both forward event to a children param
// react is more FRP on the Props, riemann is streaming,
// react implements the state monad transform
// state -> (state, result)
// Props -> State -> (State, Component)
// and implicitly passes events to child components
//https://www.sderosiaux.com/articles/2018/06/15/a-simple-way-to-write-parsers-using-the-state-monad/
// akka streams only allow one in and one out for Flows
// Zio Transducer and Conduit are one in and one out
//
// there are three notions that need to be encoded
// - streams - an infinite stream of data
// - windows - a finite subset of a stream
// - matcher -
// stream of windows?
// applying a stream to a window expr -> stream of windows
// applying a stream to a match expr -> ?
// compile parser to
// - zio streams -> ZTransducer? ZConduit?
// - akka streams -> Flow
// - fs2 -> Pipe
// - monix -> Observable?
// samza
// flink
// heron
// esper
// beepbeep3
}
|
littlenag/zparsers
|
src/main/scala/playground/ZioStreamPlayground.scala
|
package playground
/**
* @author <NAME> (<EMAIL>)
*/
object ZioStreamPlayground {
import zio.stream._
val intStream: Stream[Nothing, Int] = Stream.fromIterable(0 to 100)
val stringStream: Stream[Nothing, String] = intStream.map(_.toString)
val s = ZSink.sum[Int]
val x = intStream.run(s)
//intStream.broadcast()
//intStream.aggregateAsyncWithin()
//intStream.via()
}
|
littlenag/zparsers
|
src/main/scala/playground/FastParsePlayground.scala
|
package playground
/**
* @author <NAME> (<EMAIL>)
*/
object FastParsePlayground {
import fastparse._
import NoWhitespace._
def binary(implicit ev: P[_]) = P( ("0" | "1" ).rep.! )
def binaryNum[_: P] = P( binary.map(Integer.parseInt(_, 2)) )
val Parsed.Success("1100", _) = parse("1100", x => binary(x))
val Parsed.Success(12, _) = parse("1100", binaryNum(_))
List(1,2,3).filter(_ > 2)
}
|
littlenag/zparsers
|
src/main/scala/zio/stream/parsers/StreamMatchers.scala
|
<filename>src/main/scala/zio/stream/parsers/StreamMatchers.scala
package zio.stream.parsers
import cats._
//import cats.data._
//import cats.syntax.show._
//import cats.syntax.either._
import zio._
import zio.stream._
import zio.stream.ZTransducer.Push
trait StreamMatchers { self: Parsers =>
sealed trait ParseResult[+Result] extends Product with Serializable
final case class ParseSuccess[+Result](value: Result) extends ParseResult[Result]
final case class ParseFailure[+Result](msg: String) extends ParseResult[Result]
final case object ParseIncomplete extends ParseResult[Nothing]
final case object ParseEnded extends ParseResult[Nothing]
//implicit def showEventIn: Show[EventIn]
//implicit def showEventOut: Show[EventOut]
/*
stream[MouseClick] >>> Matcher.matchWindowed(10.sliding, doubleClickSeq)
// match on tumbling windows of 10 events
stream[MouseClick] >>> Matcher.matchWindowed(10.tumbling, doubleClickSeq)
// discard any events consumed by the parser
stream[MouseClick] >>> Matcher.matchCut(doubleClickSeq)
// retry the parser on the next event in the stream, discarding only the first event
stream[MouseClick] >>> Matcher.matchEvery(doubleClickSeq)
matchTumbling
*/
def filterSuccesses[Result] = ZTransducer
.identity[ParseResult[Result]]
.filter {
case ParseSuccess(_) => true
case _ => false
}.map {
case ParseSuccess(value) => value
}
def matchCut[R](parser: Parser[R]): ZTransducer[Any, Nothing, EventIn, R] = {
matchCutToEvents(parser) >>> filterSuccesses[R]
}
/**
* Matches as data arrives and emits on every event. Note that this may not be
* exactly what you want! The exact semantics here are to use the parser to
* consume tokens as long as the parser *requires* more tokens in order to emit
* a valid output, or until the parser derives an error on all branches. Thus,
* as soon as the parser can possibly emit a valid value, it will do so. At
* that point, the parser state is flushed and restarted with the next token.
* This is sort of the moral equivalent of the `findAllMatches` function on
* Regex, but applied repeatedly to the input stream.
*
* I can't quite contrive a scenario where these semantics result in undesirable
* behavior, but I'm sure they exist. If nothing else, it seems clear that there
* are a lot of arbitrary defaults baked into these semantics, and it would be
* nice to have a bit more control.
*
* Sidebar: this function *does* attempt to prevent completely vacuuous parse
* results. Providing a parser which accepts the empty string will not result
* in an infinite stream of successes without consuming anything.
*/
def matchCutToEvents[R](parser: Parser[R]): ZTransducer[Any, Nothing, EventIn, ParseResult[R]] = {
ZTransducer {
//import Parser._
// TODO handle other initial parser states
val initialParser = parser.asInstanceOf[Incomplete[R]]
val cleanState = (Cache, parser, false) // have NOT flushed current state?
val flushedState = (Cache, parser, true) // have YES flushed current state?
for {
curState <- ZRef.makeManaged[(Cache, Parser[R], Boolean)](cleanState)
push = { (input: Option[Chunk[EventIn]]) =>
input match {
// None is received when the upstream element has terminated
case None =>
curState
.modify {
case e @ (_, _, true) => Push.next -> flushedState
case e @ (_, _@Completed(result), false) => Push.emit(ParseSuccess(result)) -> flushedState
case e @ (_, _@Error(msg), false) => Push.emit(ParseFailure(msg)) -> flushedState
case e @ (_, _:Incomplete[R], false) => Push.emit(ParseEnded) -> flushedState
}
.flatten
case Some(is) =>
curState
.modify {
// parser should only ever be in an incomplete state
case e @ (cache, parser:Incomplete[R], _) =>
val builder = Seq.newBuilder[ParseResult[R]]
builder.sizeHint(is.length)
val initialState = (cache, parser, builder)
// For each event in the chunk, push it to the parser, process the result
val (finalCache, finalParser, toEmit) = is.foldLeft(initialState) { case ((cache, parser, pr), event) =>
val (newCache, derived) = parser.derive(event).run(cache).value
println(s">>input: '$event', parser: $parser ~> parser: $derived")
val ret = (derived.complete(), derived) match {
case (Left(Error(msg1)), Completed(value)) =>
println(s"unexpected completion: $msg1 with value $value")
(Cache, initialParser, pr += ParseFailure(msg1) )
case (Left(Error(msg1)), _@Error(msg2)) =>
println(s"completion error: $msg1")
println(s"parser-error: $msg2")
(Cache, initialParser, pr += ParseFailure(msg2) )
case (Left(Error(msg)), nextParser:Incomplete[R]) =>
println(s"incomplete parse. error: $msg")
(newCache, nextParser, pr += ParseIncomplete )
case (Right(Completed(r)), _) =>
(Cache, initialParser, pr += ParseSuccess(r) )
}
println("<<")
ret
}
Push.emit(Chunk.fromArray(toEmit.result().toArray)) -> (finalCache, finalParser, false)
case e @ (_, _, _) =>
println("!!!!!!!!!!!!!!!!!!! unexpected !!!!!!!!!!!!!!!!!!!!!!!!")
Push.next /*ZIO.dieMessage(s"Parser continued with terminated parser. $e")*/ -> cleanState
}.flatten
}
}
} yield push
}
}
}
|
marcozov/extreme_startup_servers
|
scala/scalatra/src/main/scala/ExtremeStartup.scala
|
<gh_stars>10-100
import org.scalatra.ScalatraServlet
class ExtremeStartup extends ScalatraServlet {
get("/") {
"The server is running"
}
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/SparkShellHelpers.scala
|
import com.lucidworks.searchhub.analytics.AnalyzerUtils._
import com.lucidworks.searchhub.analytics._
import com.lucidworks.spark.util.SolrSupport
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.param.IntParam
import org.apache.spark.mllib.linalg.{Vector => SparkVector}
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.functions._
/***
* This file is not meant to be imported or loaded in any way - instead, use the lines here inside of
* the spark shell ($FUSION_HOME/bin/spark-shell).
* You'll already have a "val sqlContext: SQLContext" in scope, so you can work your way down
* this file, bit by bit. It's encapsulated in an object so that it compiles, and if code changes that makes this
* file not compile, it shows that this example code needs updating.
*/
object SparkShellHelpers {
val sqlContext: SQLContext = ???
//Setup our Solr connection
val opts = Map("zkhost" -> "localhost:9983/lwfusion/3.0.0/solr", "collection" -> "lucidfind", "query" -> "*:*")
//Try this if you want to get rid of the JIRA bot noise in the index, this tends to lead to better
//clusters, since the Jenkins/JIRA file handling is pretty primitive so far and thus skews the clusters
/*
val opts = Map("zkhost" -> "localhost:9983", "collection" -> "lucidfind", "query" -> "isBot:false",
"fields" -> "id,body,title,subject,publishedOnDate,project,content”)
*/
val tmpDF = sqlContext.read.format("solr").options(opts).load
//Change this depending on how many mail messages you have loaded. The current settings
//were based on about 200K messages loaded and wanting the results to finish in a minute or two
val mailDF = tmpDF.sample(false, 0.2)
mailDF.cache()
//materialize the data so that we don't have to hit Solr every time
mailDF.count()
val textColumnName = "body"
val tokenizer = analyzerFn(noHTMLstdAnalyzerSchema)
val vectorizer = TfIdfVectorizer.build(mailDF, tokenizer, textColumnName)
val vectorizedMail = TfIdfVectorizer.vectorize(mailDF, vectorizer, textColumnName)
vectorizedMail.cache()
//Build a k-means model of size 20
val kmeansModel = ManyNewsgroups.buildKmeansModel(vectorizedMail, k = 20, maxIter = 10, textColumnName)
//Join the centroid ids back to the mail, so we can update them in Solr if we want
val mailWithCentroids = kmeansModel.transform(vectorizedMail)
mailWithCentroids.groupBy("kmeans_cluster_i").count().show()
//kmeansModel.clusterCenters.foreach(v => println(f"${math.sqrt(ManyNewsgroups.normSquaredL2(v))}%.2f")
//Save to Solr if you want
mailWithCentroids.write.format("solr").options(Map("zkhost" -> "localhost:9983/lwfusion/3.0.0/solr", "collection" -> "lucidfind", "batch_size" -> "1000")).mode(org.apache.spark.sql.SaveMode.Overwrite).save
// If you want to commit, run these
SolrSupport.getCachedCloudClient("localhost:9983/lwfusion/3.0.0/solr").commit("lucidfind")
//Do some cluster analysis to get a feel for what the clusters look like.
val lenFn = (v: SparkVector) => v.numNonzeros
val lenUdf = udf(lenFn)
val withVectLength = mailWithCentroids.withColumn("vect_len", lenUdf(mailWithCentroids("body_vect")))
withVectLength.groupBy("kmeans_cluster_i").avg("vect_len").show()
//Build a topic model using Latent Dirichlet Allocation
val ldaModel = ManyNewsgroups.buildLDAModel(vectorizedMail, k = 5, textColumnName)
//Get the actual topics
val topicTerms = ManyNewsgroups.tokensForTopics(ldaModel, vectorizer)
topicTerms.foreach { case(topicId, list) => println(s"$topicId : ${list.map(_._1).take(10).mkString(",")}") }
//Enrich our terms by building a Word2Vec model
val w2vModel = ManyNewsgroups.buildWord2VecModel(vectorizedMail, tokenizer, textColumnName)
w2vModel.findSynonyms("query", 5).take(5)
val Array(trainingData, testData) = vectorizedMail.randomSplit(Array(0.8, 0.2), 123L)
val labelColumnName = "project"
//Do some classification on the projects. This is like 20 Newsgroups (http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.data.html) on steriods
val paramGrids: Map[RandomForestClassifier => IntParam, Array[Int]] =
Map(
((rf: RandomForestClassifier) => rf.maxDepth) -> Array(5, 10, 20),
((rf: RandomForestClassifier) => rf.maxBins) -> Array(8, 16, 32),
((rf: RandomForestClassifier) => rf.numTrees) -> Array(50, 100)
)
val (randomForestModel, randomForestMetrics) =
ManyNewsgroups.trainRandomForestClassifier(trainingData, testData, labelColumnName, textColumnName, paramGrids)
//For each document, find 5 top terms(by tfidf) and 2 of their synonyms(by w2v model), store them in 'topSyns'
//output schema looks like following
/*
root
|-- id: string (nullable = false)
|-- body: string (nullable = true)
|-- title: string (nullable = true)
|-- subject: string (nullable = true)
|-- publishedOnDate: timestamp (nullable = false)
|-- project: string (nullable = true)
|-- content: string (nullable = true)
|-- body_vect: vector (nullable = true)
|-- topSyns: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- _1: string (nullable = true)
| | |-- _2: array (nullable = true)
| | | |-- element: string (containsNull = true)
*/
def findSyn = (a: String) => w2vModel.findSynonyms(a,2).map(_._1)
def getTopSyns = (t: SparkVector) => t.toArray.zipWithIndex.sortBy(-_._1).take(5).map(_._2).flatMap(vectorizer.dictionary.map(_.swap).get).map(a => (a, findSyn(a)))
def getTopSynsUdf = udf(getTopSyns)
vectorizedMail.withColumn("topSyns", getTopSynsUdf(vectorizedMail("body_vect"))).take(5)
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/MailMessage.scala
|
<filename>searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/MailMessage.scala<gh_stars>10-100
package com.lucidworks.searchhub.analytics
import java.text.{SimpleDateFormat, DateFormat}
import scala.collection.JavaConverters._
import com.lucidworks.apollo.common.pipeline.PipelineDocument
import com.lucidworks.searchhub.connectors.BaseDocument
import com.lucidworks.searchhub.connectors.mail.handler.MimeMailParser
import org.apache.spark.sql.Row
/**
* Spark-Solr Schema for a DataFrame of the searchhub index
*/
case class MailMessage(id: String, project: String, list: String, listType: String,
from: String, fromEmail: String, inReplyTo: String,
threadId: String, numReplies: Int,
subject: String, simpleSubject: String, body: String, displayBody: String,
publishedOnDate: java.sql.Timestamp)
object MailMessage {
def fromRawString(id: String, contents: String): Option[MailMessage] = {
val mailParser = new MimeMailParser//TODO: fix this to take in configurable patterns like the real stage does
val doc = new PipelineDocument(id)
doc.setField("_raw_content_", contents.getBytes)
val parsedDocOpt = try {
Option(mailParser.parse(doc))
} catch {
case e: Exception =>
println("unable to parse " + id)
None
}
parsedDocOpt.map { parsedDoc =>
val solrDateFmt: DateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'")
def concatenated(field: String): String = parsedDoc.getFieldValues(field).asScala.mkString(" ")
MailMessage(
id = id,
project = concatenated(MimeMailParser.FIELD_PROJECT),
list = concatenated(MimeMailParser.FIELD_LIST),
listType = concatenated(MimeMailParser.FIELD_LIST_TYPE),
from = concatenated(MimeMailParser.FIELD_FROM),
fromEmail = concatenated(MimeMailParser.FIELD_FROM_EMAIL),
inReplyTo = concatenated(MimeMailParser.FIELD_IN_REPLY_TO),
threadId = concatenated(MimeMailParser.FIELD_THREAD_ID),
numReplies = 0,
subject = concatenated(MimeMailParser.FIELD_SUBJECT),
simpleSubject = concatenated(MimeMailParser.FIELD_SUBJECT_SIMPLE),
body = concatenated(BaseDocument.FIELD_CONTENT),
displayBody = concatenated(BaseDocument.FIELD_CONTENT_DISPLAY),
publishedOnDate = new java.sql.Timestamp(
solrDateFmt.parse(parsedDoc.getFirstFieldValue(MimeMailParser.FIELD_SENT_DATE).toString).getTime)
)
}
}
def fromRow(row: Row): MailMessage = {
def concatenated(field: String): String = row.getAs[String](field)
MailMessage(
id = row.getAs[String]("id"),
project = concatenated(MimeMailParser.FIELD_PROJECT),
list = concatenated(MimeMailParser.FIELD_LIST),
listType = concatenated(MimeMailParser.FIELD_LIST_TYPE),
from = concatenated(MimeMailParser.FIELD_FROM),
fromEmail = concatenated(MimeMailParser.FIELD_FROM_EMAIL),
inReplyTo = concatenated(MimeMailParser.FIELD_IN_REPLY_TO),
threadId = concatenated(MimeMailParser.FIELD_THREAD_ID),
numReplies = row.getAs[Int](MimeMailParser.FIELD_REPLIES),
subject = concatenated(MimeMailParser.FIELD_SUBJECT),
simpleSubject = concatenated(MimeMailParser.FIELD_SUBJECT_SIMPLE),
body = concatenated(BaseDocument.FIELD_CONTENT),
displayBody = concatenated(BaseDocument.FIELD_CONTENT_DISPLAY),
publishedOnDate = row.getAs[java.sql.Timestamp](MimeMailParser.FIELD_SENT_DATE)
)
}
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/AnalyzerUtils.scala
|
package com.lucidworks.searchhub.analytics
import com.lucidworks.spark.analysis.LuceneTextAnalyzer
/**
* Example schema for various Lucene analyzer chains
*/
object AnalyzerUtils {
val whitespaceSchema =
"""{ "analyzers": [
| { "name": "ws",
| "tokenizer": { "type": "whitespace"} }],
| "fields": [{ "regex": ".+", "analyzer": "ws" } ]}""".stripMargin
val stdAnalyzerSchema =
"""{ "analyzers": [
| { "name": "StdTokLowerStop",
| "tokenizer": { "type": "standard" },
| "filters": [
| { "type": "lowercase" },
| { "type": "stop" }] }],
| "fields": [{ "regex": ".+", "analyzer": "StdTokLowerStop" } ]}
""".stripMargin
val noHTMLstdAnalyzerSchema =
"""{ "analyzers": [
| { "name": "StdTokLowerStop",
| "charFilters": [ { "type": "htmlstrip" } ],
| "tokenizer": { "type": "standard" },
| "filters": [
| { "type": "lowercase" },
| { "type": "stop" }] }],
| "fields": [{ "regex": ".+", "analyzer": "StdTokLowerStop" } ]}
""".stripMargin
val germanStemSchema =
"""{ "analyzers": [
| { "name": "gstem",
| "tokenizer": { "type": "standard" },
| "filters": [
| {"type": "lowercase"},
| {"type": "stop"},
| {"type": "germanstem"}
| ]}],
| "fields": [{ "regex": ".+", "analyzer": "gstem" } ]}""".stripMargin
val porterStemSchema =
"""{ "analyzers": [
| { "name": "std",
| "tokenizer": { "type": "standard" },
| "filters": [
| {"type": "lowercase"},
| {"type": "stop"},
| {"type": "porterstem"}
| ]}],
| "fields": [{ "regex": ".+", "analyzer": "std" } ]}""".stripMargin
def analyzerFn(schema: String): String => List[String] = {
val analyzer = new LuceneTextAnalyzer(schema)
(s: String) => analyzer.analyze("N/A", s).toList
}
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/ManyNewsgroups.scala
|
<reponame>liamstar97/searchhub
package com.lucidworks.searchhub.analytics
import org.apache.spark.ml.param.IntParam
import org.apache.spark.ml.{PipelineModel, Model, Pipeline}
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.clustering.{LDAModel, LDA, KMeans}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{IndexToString, StringIndexer}
import org.apache.spark.mllib.feature.Word2Vec
import org.apache.spark.ml.tuning.{CrossValidator, ParamGridBuilder}
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.{Vector => SparkVector, Vectors, DenseVector, SparseVector}
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
object ManyNewsgroups {
def subtract(v1: SparkVector, v2: SparkVector): SparkVector = {
(v1, v2) match {
case (d1: DenseVector, d2: DenseVector) => {
val (a1, a2) = (d1.toArray, d2.toArray)
Vectors.dense(a1.indices.toArray.map(i => a1(i) - a2(i)))
}
case (sp1: SparseVector, sp2: SparseVector) => subtract(sp1.toDense, sp2.toDense) // TODO / FIXME!!!
case _ => throw new IllegalArgumentException("all vectors are either dense or sparse!")
}
}
def normSquaredL2(v: SparkVector) = v.toArray.foldLeft(0.0) { case(a: Double,b: Double) => a + b*b }
def distSquaredL2(v1: SparkVector, v2: SparkVector) = normSquaredL2(subtract(v1, v2))
def distL2Matrix(a: Array[SparkVector]) = {
for {
(v1, i) <- a.zipWithIndex
(v2, j) <- a.zipWithIndex
} yield {
(i, j, distSquaredL2(v1, v2))
}
}
def buildKmeansModel(df: DataFrame, k: Int = 5, maxIter: Int = 10, fieldName: String) = {
val kMeans = new KMeans()
.setK(k)
.setMaxIter(maxIter)
.setFeaturesCol(fieldName + "_vect")
.setPredictionCol("kmeans_cluster_i")
kMeans.fit(df)
}
def buildLDAModel(df: DataFrame, k: Int = 5, fieldName: String) = {
val lda = new LDA().setK(k).setFeaturesCol(fieldName + "_vect")
lda.fit(df)
}
/**
* Helper method to let you inspect your LDA model
* @param lda LDAModel fit to your corpus
* @param vectorizer the weighted dictionary tokenizer + vectorizer struct
* @return a map of topic id -> list of (token, weight) pairs sorted by probability
*/
def tokensForTopics(lda: LDAModel, vectorizer: TfIdfVectorizer): Map[Int, List[(String, Double)]] = {
val reverseDictionary = vectorizer.dictionary.toList.map(_.swap).toMap
val topicDist = lda.topicsMatrix
val tokenWeights = for { k <- 0 until lda.getK; i <- 0 until vectorizer.dictionary.size } yield {
k -> (reverseDictionary(i), topicDist(i, k))
}
tokenWeights.groupBy(_._1).mapValues(_.map(_._2).toList.sortBy(-_._2))
}
def buildWord2VecModel(df: DataFrame, tokenizer: String => List[String], fieldName: String) = {
val tokensRdd = df.rdd.map(row => tokenizer(row.getAs[String](fieldName)).toSeq)
val word2Vec = new Word2Vec()
word2Vec.fit(tokensRdd)
}
def trainRandomForestClassifier(trainingData: DataFrame,
testData: DataFrame,
labelColumnName: String,
featuresColumnName: String,
paramGrids: Map[RandomForestClassifier => IntParam, Array[Int]] = Map.empty) = {
val labelIndexCol = labelColumnName + "_idx"
val featureVectorCol = featuresColumnName + "_vect"
val labelIndexer = new StringIndexer()
.setInputCol(labelColumnName)
.setOutputCol(labelIndexCol)
.fit(trainingData)
val randomForest: RandomForestClassifier = new RandomForestClassifier()
.setLabelCol(labelIndexCol)
.setFeaturesCol(featureVectorCol)
val predictionCol = "prediction"
val labelConverter = new IndexToString()
.setInputCol(predictionCol)
.setOutputCol("predicted_" + labelColumnName)
.setLabels(labelIndexer.labels)
val rfPipe = new Pipeline().setStages(Array(labelIndexer, randomForest, labelConverter))
val paramGridMap = paramGrids.map { case (paramFn, grid) => (paramFn(randomForest), grid) }
val paramGrid = new ParamGridBuilder()
.addGrid(randomForest.maxDepth, paramGridMap.getOrElse(randomForest.maxDepth, Array(5, 20, 40)))
.addGrid(randomForest.maxBins, paramGridMap.getOrElse(randomForest.maxBins, Array(8, 32, 64)))
.addGrid(randomForest.numTrees, paramGridMap.getOrElse(randomForest.numTrees, Array(25, 100, 400)))
.build()
val evaluator = new MulticlassClassificationEvaluator()
.setLabelCol(labelIndexCol)
.setPredictionCol(predictionCol)
.setMetricName("precision") // "f1", "precision", "recall", "weightedPrecision", "weightedRecall"
val cv = // 5-fold cross-validation
new CrossValidator().setEstimator(rfPipe).setEvaluator(evaluator).setEstimatorParamMaps(paramGrid).setNumFolds(5)
val model = cv.fit(trainingData)
val bestModel = model.bestModel.asInstanceOf[Model[PipelineModel]]
// measure metrics on unseen-by-cross-validation held-out test data.
val classifiedData = bestModel.transform(testData)
import trainingData.sqlContext.implicits._
val predictionsAndLabels: RDD[(Double, Double)] =
classifiedData.select(predictionCol, labelIndexCol).map { r: org.apache.spark.sql.Row => (r.getDouble(0), r.getDouble(1)) }.rdd
val metrics = new MulticlassMetrics(predictionsAndLabels)
(bestModel, metrics)
}
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/recommender/SimpleTwoHopRecommender.scala
|
package com.lucidworks.searchhub.recommender
import org.apache.spark.sql.{SQLContext, Row, DataFrame}
import org.slf4j.LoggerFactory
case class UserId(id: String)
case class ItemId(id: String)
case class Pref(userId: UserId, itemId: ItemId, weight: Double)
case class ItemSim(itemId1: ItemId, itemId2: ItemId, weight: Double)
case class UnStructSim(itemId1: String, itemId2: String, weight_d: Double)
object SimpleTwoHopRecommender extends Serializable {
val log = LoggerFactory.getLogger("SimpleTwoHopRecommender")
def itemRecs(userItemMatrix: DataFrame, userIdCol: String, itemIdCol: String, weightCol: String,
recsPerItem: Int = 10, outerProductLimit: Int = 100): DataFrame = {
val toPref = (row: Row) =>
Pref(UserId(row.getAs[String](userIdCol)), ItemId(row.getAs[String](itemIdCol)), row.getAs[Double](weightCol))
val prefMatrix = userItemMatrix.rdd.map(toPref)
if (log.isDebugEnabled) {
log.debug(s"using ${prefMatrix.count()} preferences to compute item similarity recs")
}
val matrixProduct = prefMatrix.groupBy(_.userId).flatMap { case (userId, prefs) =>
val topPrefs = prefs.toList.sortBy(-_.weight).take(outerProductLimit)
for {
pref1 <- topPrefs
pref2 <- topPrefs
if pref1.itemId != pref2.itemId
} yield {
ItemSim(pref1.itemId, pref2.itemId, pref1.weight * pref2.weight)
}
}
if (log.isDebugEnabled) {
log.debug(s"total num outer product of prefs: ${matrixProduct.count()}")
}
val matrixSumReduced = matrixProduct.groupBy(sim => (sim.itemId1, sim.itemId2)).map { case (_, sims: Iterable[ItemSim]) =>
sims.reduce { (s1: ItemSim, s2: ItemSim) => s1.copy(weight = s1.weight + s2.weight) }
}
if (log.isDebugEnabled) {
log.debug(s"reduced outer product size: ${matrixSumReduced.count()}")
}
val recs = matrixSumReduced.groupBy(_.itemId1).mapValues(_.toList.sortBy(-_.weight).take(recsPerItem)).flatMap(_._2)
val unStructRecs = recs.map(s => UnStructSim(s.itemId1.id, s.itemId2.id, s.weight))
if (log.isDebugEnabled) {
log.debug(s"total numRecs: ${unStructRecs.count()}")
}
import userItemMatrix.sqlContext.implicits._
unStructRecs.toDF.withColumnRenamed("itemId1", "rec_for_" + itemIdCol).withColumnRenamed("itemId2", itemIdCol)
}
def runRecs(sqlContext: SQLContext, recsPerItem: Int = 10, outerProductLimit: Int = 100) = {
val opts = Map("zkhost" -> "localhost:9983", "collection" -> "lucidfind_signals_aggr", "query" -> "*:*")
val tmpDF = sqlContext.read.format("solr").options(opts).load
// TODO: fix to use threadId, once that's working
val recs = SimpleTwoHopRecommender.itemRecs(tmpDF,
"from_email_s", "subject_simple_s", "weight_d", recsPerItem, outerProductLimit)
//TODO: include / not include "review request" messages?
val finalRecs = recs//.filter(not($"rec_for_subject_simple_s".contains("review request"))).filter(not($"subject_simple_s".contains("review request")))
val totalRecsCount = finalRecs.count()
finalRecs.write.format("solr").options(Map("zkhost" -> "localhost:9983", "collection" -> "lucidfind_thread_recs")).mode(org.apache.spark.sql.SaveMode.Overwrite).save
com.lucidworks.spark.util.SolrSupport.getCachedCloudClient("localhost:9983").commit("lucidfind_thread_recs")
totalRecsCount
}
}
// SimpleTwoHopRecommender.runRecs(sqlContext)
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/SearchHubLoader.scala
|
<reponame>liamstar97/searchhub<gh_stars>10-100
package com.lucidworks.searchhub.analytics
import java.io.File
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import scala.io.Source
import scala.util.Random
object SearchHubLoader {
val projects = Set("lucene", "hadoop", "hive", "hbase", "nutch", "spark", "mahout", "pig", "kafka", "zookeeper",
"uima", "oozie", "tika", "accumulo", "manifoldcf", "mesos")
case class Config(sqlContext: SQLContext, options: Map[String, String] = opts())
def opts(zkHost: String = "localhost:9983", collection: String = "lucidfind", query: String ="lucidfind") =
Map("zkhost" -> zkHost, "collection" -> collection, "query" -> query, "fields" -> "isBot,mimeType_s,from_email,subject,suggest,project,_lw_data_source_collection_s,title,body,body_display,threadId,parent_s,from,_lw_data_source_type_s,_lw_data_source_s,author,author_facet,message_id,list,_lw_batch_id_s,in_reply_to,list_type,subject_simple,_lw_data_source_pipeline_s,hash_id,id,lastModified_dt,number,dateCreated,fetchedDate_dt,publishedOnDate,depth,fileSize,length_l,lastModified,_version_")
def load(sqlContext: SQLContext, opts: Map[String, String]) = loadFromSolr(Config(sqlContext, opts))
def loadFromSolr(config: Config): DataFrame = config.sqlContext.read.format("solr").options(config.options).load
def loadMessages(config: Config): RDD[MailMessage] = {
import config.sqlContext.implicits._
config.options.get("localMirrorBaseDir") match {
case (Some(baseDir)) => loadFromLocalDir(baseDir,
config.sqlContext.sparkContext,
config.options.getOrElse("sampleSize", "1000").toInt,
config.options.getOrElse("seed", "1234").toLong)
case None => loadFromSolr(config).map(MailMessage.fromRow).rdd
}
}
def loadFromLocalDir(baseDir: String, sparkContext: SparkContext, sampleSize: Int, seed: Long) = {
println("loading mail files from: " + baseDir)
val rnd = new Random(seed)
println("using seed: " + seed)
val subDirs = baseDir.split(",").toList
val subRdds = for { dir <- subDirs } yield {
val topLevel = new File(dir).listFiles()
.map(f => (projectFromList(listFromPath(f.getAbsolutePath)), f.getAbsolutePath))
.filter(p => projects.contains(p._1))
.groupBy(_._1)
.mapValues(_.map(_._2))
val messageRdds = for {(project, topLevelSubDirs) <- topLevel} yield {
println(s"loading messages from: ${topLevelSubDirs.mkString("[", ", ", "]")}")
val files = topLevelSubDirs.flatMap(deepFilePaths)
println(s"found ${files.length}, but taking sample of $sampleSize of them")
val sampleFiles = files.map((_, rnd.nextDouble())).sortBy(_._2).map(_._1).take(sampleSize)
sparkContext.parallelize(sampleFiles.flatMap(f =>
MailMessage.fromRawString(f, Source.fromFile(f).getLines().mkString("\n"))).toList)
}
messageRdds
}
println("returning union of grouped message RDDs")
val rdds = subRdds.toList.flatten
sparkContext.union(rdds)
}
def recursiveListFiles(f: File): Array[File] = {
Option(f.listFiles)
.map(t => t ++ t.filter(_.isDirectory).flatMap(recursiveListFiles))
.getOrElse(Array.empty[File])
}
def listFromPath(path: String) = {
val parts = path.split("/")
parts(parts.indexOf("mod_mbox") + 1)
}
def projectFromList(list: String) = {
list.split("-").head match {
case ("incubator") => list.split("-")(1)
case h => h
}
}
def deepFilePaths(base: String) = recursiveListFiles(new File(base)).filterNot(_.isDirectory).map(_.getAbsolutePath)
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/GraphUtils.scala
|
<filename>searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/GraphUtils.scala
package com.lucidworks.searchhub.analytics
import scala.collection.mutable
/**
* Created by jakemannix on 4/29/16.
*/
object GraphUtils {
/**
* Finds the adjacency list form of a graph, not caring about orientation
* @param edges as src->dest pairs
* @return adjacency list form of the graph
*/
def buildAdjacencyGraph(edges: List[(String, String)]): Map[String, List[String]] = {
val outEdges = edges.flatMap(item => Option(item._2).map(rep => (item._1, List(rep))))
val inEdges = edges.flatMap(item => Option(item._2).map(rep => (rep, List(item._1))))
val loners = (edges.map(_._1).toSet -- (outEdges.map(_._1) ++ inEdges.map(_._1))).map(_ -> List.empty[String])
val allEdges = outEdges ++ inEdges ++ loners
val graph = allEdges.groupBy(_._1).mapValues(_.flatMap(_._2))
graph
}
/**
* Depth first search to find the connected component of the passed-in vertex
* @param graph adjacency lists
* @param components recursive components as they are built
* @param currentComponent of the current vertex
* @param vertex to find the component of, via depth-first-search
*/
def dfs(graph: Map[String, List[String]], components: mutable.Map[String, mutable.ListBuffer[String]],
currentComponent: mutable.ListBuffer[String],
vertex: String): Unit = {
currentComponent += vertex
components.put(vertex, currentComponent)
for {
neighbor <- graph.getOrElse(vertex, List.empty[String])
if !components.keySet.contains(neighbor)
} {
dfs(graph, components, currentComponent, neighbor)
}
}
/**
* Find the connected components of the graph, as a map which shows which component each vertex is in
* @param edges as src->dest pairs
* @return a map of vertex -> list of vertices in its component (including itself)
*/
def connectedComponents(edges: List[(String, String)]): Map[String, List[String]] = {
val graph = buildAdjacencyGraph(edges)
val components = mutable.Map[String, mutable.ListBuffer[String]]()
graph.keys.foreach { vertex =>
if (!components.keySet.contains(vertex)) {
val newComponent = mutable.ListBuffer[String]()
dfs(graph, components, newComponent, vertex)
}
}
components.mapValues(_.toList).toMap
}
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/TfIdfVectorizer.scala
|
<filename>searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/TfIdfVectorizer.scala<gh_stars>10-100
package com.lucidworks.searchhub.analytics
import org.apache.spark.mllib.linalg.{Vectors, Vector => SparkVector}
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
/**
*
*/
case class TfIdfVectorizer(tokenizer: String => List[String],
dictionary: Map[String, Int],
idfs: Map[String, Double]) extends (String => SparkVector) {
override def apply(s: String): SparkVector = {
val tokenTfs = tokenizer(s).groupBy(identity).mapValues(_.size).toList
val tfIdfMap = tokenTfs.flatMap { case (token, tf) =>
dictionary.get(token).map(idx => (idx, tf * idfs.getOrElse(token, 1.0))) }
val norm = math.sqrt(tfIdfMap.toList.map(_._2).foldLeft(0d) { case (tot , w) => tot + w * w })
val normalizedWeights = if (norm > 0) tfIdfMap.map { case (idx, w) => (idx, w / norm)} else tfIdfMap
Vectors.sparse(dictionary.size, normalizedWeights)
}
}
object TfIdfVectorizer {
/**
*
* @param df
* @param tokenizer
* @param fieldName
* @return
*/
def build(df: DataFrame, tokenizer: String => List[String], fieldName: String,
minSupport: Int = 5, maxSupportFraction: Double = 0.75) = {
val tokenField = fieldName + "_tokens"
val withWords = df.select(fieldName).explode(fieldName, tokenField)(tokenizer.andThen(_.distinct))
val numDocs = df.count().toDouble
val maxSupport = maxSupportFraction * numDocs
val tokenCountsDF =
withWords.groupBy(tokenField).count().filter(col("count") > minSupport && col("count") < maxSupport)
import df.sqlContext.implicits._
val idfs = tokenCountsDF.map(r => (r.getString(0), math.log(1 + (numDocs / (1 + r.getLong(1)))))).collect().toMap
val dictionary = idfs.keys.toArray.zipWithIndex.toMap
TfIdfVectorizer(tokenizer, dictionary, idfs)
}
def vectorize(df: DataFrame, vectorizer: TfIdfVectorizer, fieldName: String) = {
val vectorizerUdf = udf(vectorizer)
df.withColumn(fieldName + "_vect", vectorizerUdf(col(fieldName)))
}
}
|
liamstar97/searchhub
|
searchhub-fusion-plugins/src/main/scala/com/lucidworks/searchhub/analytics/MailThreadJob.scala
|
<reponame>liamstar97/searchhub<gh_stars>10-100
package com.lucidworks.searchhub.analytics
import org.apache.spark.{SparkContext, Accumulator}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, DataFrame}
import org.slf4j.LoggerFactory
// DEPRECATED: Scala for old mail threading job that did not work
object MailThreadJob {
val log = LoggerFactory.getLogger("MailThreadJob")
// simple struct to let us do the grouping
case class MessageThreadItem(id: String, subjectSimple: String, inReplyTo: String, threadId: String, thread_size_i: Int = 0)
// TODO: apply this to all IDs in the index!
val normalizeId = (id: String) => Option(id).map(_.split("/").last.replaceAll(" ", "+")).getOrElse(id)
val rowToMessageThreadItem = (row: Row) => MessageThreadItem(
normalizeId(row.getAs[String]("id")),
row.getAs[String]("subject_simple"),
row.getAs[String]("in_reply_to"),
row.getAs[String]("threadId")
)
def createThreadGroups(mailDataFrame: DataFrame,
accumulatorMap: Map[String, Accumulator[Int]] = Map.empty,
alwaysOverride: Boolean = false): DataFrame = {
import org.apache.spark.sql.functions._
import mailDataFrame.sqlContext.implicits._
//val idTrmUdf = udf((id: String) => id.split("/").last)
//mailDataFrame.withColumn("id", idTrmUdf(col("id")))
val subjectGroups: RDD[(String, Iterable[Row])] = mailDataFrame.rdd.groupBy(_.getAs[String]("subject_simple"))
val messageThreadItems = subjectGroups.values.flatMap(x => createThreads(x, accumulatorMap)).toDF()
.select("id", "threadId", "thread_size_i").withColumnRenamed("threadId", "newThreadId")
val reJoined = mailDataFrame.join(messageThreadItems, "id")
val overrideIfEmpty = udf((oldCol: String, overrideCol: String) =>
if (alwaysOverride || oldCol == null || oldCol.equalsIgnoreCase("unknown")) {
overrideCol
} else {
oldCol
})
val withNewThreadIdCol =
reJoined.withColumn("overRiddenThreadId", overrideIfEmpty(reJoined.col("threadId"), reJoined.col("newThreadId")))
.drop("threadId")
.drop("newThreadId")
val renamed = withNewThreadIdCol.withColumnRenamed("overRiddenThreadId", "threadId")
renamed
}
def countThreads(mailDataFrame: DataFrame): Long = mailDataFrame.select("threadId").distinct().count()
def accumulators(sc: SparkContext) = {
val acc = (s: String) => s -> sc.accumulator(0, s)
Map(
acc("num_messages"),
acc("num_msgs_with_threadIds"),
acc("num_msgs_without_threadIds"),
acc("num_subjects"),
acc("num_subjects_with_all_known_threadIds"),
acc("num_subjects_with_no_known_threadIds"),
acc("num_subjects_with_some_known_some_unknown_threadIds")
)
}
/*
val msgAcc = acc("num_messages")
val msgKnown = acc("num_msgs_with_threadIds")
val msgUnknown = acc("num_msgs_without_threadIds")
val subAcc = acc("num_subjects")
val subWithAllKnownAcc = acc("num_subjects_with_all_known_threadIds")
val subWithNoKnownAcc = acc("num_subjects_with_no_known_threadIds")
val subWithMixedAcc = acc("num_subjects_with_some_known_some_unknown_threadIds")
*/
def createThreads(rows: Iterable[Row], accumulatorMap: Map[String, Accumulator[Int]]): List[MessageThreadItem] = {
val items: List[MessageThreadItem] = rows.toList.map(rowToMessageThreadItem)
val threadedItems = createThreads(items)
val unknown = threadedItems.count(_.threadId.equalsIgnoreCase("unknown"))
val known = threadedItems.count(!_.threadId.equalsIgnoreCase("unknown"))
val ct = items.size
def add(k: String, n: Int) = accumulatorMap.get(k).foreach(_ += n)
add("num_messages", ct)
add("num_msgs_without_threadIds", unknown)
add("num_msgs_with_threadIds", known)
add("num_subjects", 1)
if (ct == known) {
add("num_subjects_with_all_known_threadIds", 1)
} else if (known == 0) {
add("num_subjects_with_no_known_threadIds", 1)
} else {
add("num_subjects_with_some_known_some_unknown_threadIds", 1)
}
val sub = threadedItems.head.subjectSimple
log.info(s"createThreads: $ct messages, $unknown unknown threadId, subject: $sub")
threadedItems
}
def createThreads(items: List[MessageThreadItem]): List[MessageThreadItem] = {
val itemsById = items.map(item => item.id -> item).toMap
val edges = items.map(item => item.id -> item.inReplyTo)
val components =
GraphUtils.connectedComponents(edges).values.toList.toSet.map((ids: List[String]) => ids.flatMap(itemsById.get))
components.toList.flatMap { component =>
// as it's possible there will be no messages in this thread which have no inReplyTo (as the originator of the
// thread may be missing from this component, or the DataFrame entirely), leave the threadIds alone in this case.
// another option, however, is to take the most common inReplyTo, and assign that as the threadId. But then that
// message may not be in the index, leading to possible UI issues.
val threadIdOpt = component.find(_.inReplyTo == null).map(_.id)
val res = threadIdOpt.map(threadId =>
component.map(item => item.copy(threadId = threadId, thread_size_i = component.size))).getOrElse(component)
res
}
}
}
|
sathishsri88/practice
|
src/main/scala/lectures/part1as/DarkSugar.scala
|
package lectures.part1as
object DarkSugar extends App {
val anUnImplementedInstance: UnImplemented = (a: String) => a.toInt
abstract class UnImplemented {
def implemented(x: Int): Int = x * 3
def toImplement(in: String): Int
}
// ending with : are always right assosciative
}
|
sathishsri88/practice
|
src/main/scala/lectures/part1as/Recap.scala
|
<filename>src/main/scala/lectures/part1as/Recap.scala
package lectures.part1as
import scala.annotation.tailrec
object Recap extends App {
val aCondition: Boolean = false
val aCodeBlock = {
if (aCondition) {
42
}
65
}
val theUnit: Unit = println("Hello Scala !!!")
def recInt(x: Int): Int = x * 1
@tailrec
def factorial(n: Int, accumulator: Int): Int = {
if (n <= 0) {
accumulator
}
else {
factorial(n - 1, n * accumulator)
}
}
//OOPS
class Animal
class Dog extends Animal
val aDog : Animal = new Dog // subtyping polymorphism
trait Carnivore{
def eat(a: Animal) : Unit
}
class Crocordile extends Animal with Carnivore {
override def eat(a: Animal): Unit = {
println("Crunch")
}
}
//Method notations
val aCroc = new Crocordile
aCroc.eat(aDog)
aCroc eat aDog // Natural language
//Anonymous class
val aCarnivore = new Carnivore {
override def eat(a: Animal): Unit = println("roar!")
}
//generics
abstract class MyList[+A] // + is co variance
//singletons and companions
object MyList
//Case classes
case class Person(name:String, age : Int)
//Exceptions and try/catch/finally
val throwsNE = throw new RuntimeException // Nothing -> type of nothingness, here type is nothing
val aPotentialFailure: Unit = try {
throwsNE
} catch {
case e : Exception => println(s"Caught exception $e")
}finally {
println("done logs")
}
// everything in Scala is a Object
//packaging and imports
//Functional programming
val incrementer = new Function[Int,Int] {
override def apply(v1: Int): Int = v1+10
}
val funcResult: Int = incrementer(10)
// first class support for functions
val anonymousIncrementer = (x:Int) => x +1
List(1,2,3).map(anonymousIncrementer) // Here MAP is a Higher order function[HOF]
//HOF
// Map, flatMap, filter
//for comprehension
val pairs = for{
num <- List(1,2,3) if num >0
char <- List('a','b','c')
} yield num + "-"+ char
// Scala collections : Seqs, Arrays,Lists, Vectors, Tuples
val aMap = Map("Sathish" -> 42,
"S"-> 53)
//Collections : options, Try
val anOption = Some(2)
// Pattern matching
val x = 2
val order = x match {
case 1 => "First"
case 2 => "Second"
case 3 => "third"
}
val bob = Person("Bob",32)
val greeting = bob match {
case Person(name, _) => s"Hi, My name is $name"
}
//all patterns
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/model/graphentities/Entity.scala
|
<gh_stars>0
package com.raphtory.core.model.graphentities
import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap
/** *
* Represents Graph Entities (Edges and Vertices)
* Contains a Map of properties (currently String to string)
* longs representing unique vertex ID's stored in subclassses
*
* @param creationTime ID of the message that created the entity
* @param isInitialValue Is the first moment this entity is referenced
*/
abstract class Entity(val creationTime: Long, isInitialValue: Boolean) {
// Properties from that entity
private var entityType: String = null
var properties: ParTrieMap[String, Property] = ParTrieMap[String, Property]()
// History of that entity
object HistoryOrdering extends Ordering[Long] {
def compare(key1: Long, key2: Long) = key2.compareTo(key1)
}
var history: mutable.TreeMap[Long, Boolean] = mutable.TreeMap(creationTime -> isInitialValue)(HistoryOrdering)
var oldestPoint: Long = creationTime
var newestPoint: Long = creationTime
// History of that entity
def removeList: mutable.TreeMap[Long, Boolean] = history.filter(f=> !f._2)
def setType(newType: String): Unit = if (entityType == (null)) entityType = newType
def getType: String = if (entityType == null) "" else entityType
def revive(msgTime: Long): Unit = {
checkOldestNewest(msgTime)
history.put(msgTime, true)
}
def kill(msgTime: Long): Unit = {
checkOldestNewest(msgTime)
history.put(msgTime, false)
}
def checkOldestNewest(msgTime: Long) = {
if (msgTime > newestPoint)
newestPoint = msgTime
if (oldestPoint > msgTime) //check if the current point in history is the oldest
oldestPoint = msgTime
}
/** *
* override the apply method so that we can do edge/vertex("key") to easily retrieve properties
*/
def apply(property: String): Property = properties(property)
/** *
* Add or update the property from an edge or a vertex based, using the operator vertex + (k,v) to add new properties
*/
def +(msgTime: Long, immutable: Boolean, key: String, value: Any): Unit =
properties.get(key) match {
case Some(p) => {
p update (msgTime, value)
}
case None =>
if (immutable) properties.put(key, new ImmutableProperty(msgTime, value))
else {
properties.put(key, new MutableProperty(msgTime, value))
}
}
def wipe() = history = mutable.TreeMap()(HistoryOrdering)
protected def closestTime(time: Long): (Long, Boolean) = {
var closestTime: Long = -1
var value = false
for ((k, v) <- history)
if (k <= time)
if ((time - k) < (time - closestTime)) {
closestTime = k
value = v
}
(closestTime, value)
}
def aliveAt(time: Long): Boolean =
if (time < oldestPoint)
false
else {
val closest = closestTime(time)
closest._2
}
def aliveAtWithWindow(time: Long, windowSize: Long): Boolean =
if (time < oldestPoint)
false
else {
val closest = closestTime(time)
if (time - closest._1 <= windowSize)
closest._2
else false
}
def activityAfter(time:Long) = history.exists(k => k._1 >= time)
def activityBefore(time:Long)= history.exists(k => k._1 <= time)
def activityBetween(min:Long, max:Long)= history.exists(k => k._1 >= min && k._1 <= max)
}
|
dorely103/Raphtory
|
mainproject/project/plugins.sbt
|
<reponame>dorely103/Raphtory<filename>mainproject/project/plugins.sbt
// "2.3.2" is just sbt plugin version
addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.3.2")
addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.1")
addSbtPlugin("com.lightbend.sbt" % "sbt-javaagent" % "0.1.5")
addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.9")
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/PartitionManager/Writer.scala
|
package com.raphtory.core.actors.PartitionManager
import akka.actor.SupervisorStrategy.Resume
import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, OneForOneStrategy, Terminated}
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import com.raphtory.core.actors.RaphtoryActor
import com.raphtory.core.model.EntityStorage
import com.raphtory.core.model.communication._
import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* The graph partition manages a set of vertices and there edges
* Is sent commands which have been processed by the command Processor
* Will process these, storing information in graph entities which may be updated if they already exist
* */
class Writer(
id: Int,
test: Boolean,
managerCountVal: Int,
workers: ParTrieMap[Int, ActorRef],
storage: ParTrieMap[Int, EntityStorage]
) extends RaphtoryActor {
private val scheduledTaskMap: mutable.HashMap[String, Cancellable] = mutable.HashMap[String, Cancellable]()
implicit val executionContext = context.system.dispatchers.lookup("misc-dispatcher")
// Id which refers to the partitions position in the graph manager map
val managerId: Int = id
val children: Int = totalWorkers
var lastLogTime: Long = System.currentTimeMillis() / 1000
// should the handled messages be printed to terminal
val printing: Boolean = false
var managerCount: Int = managerCountVal
var messageCount: Int = 0
var secondaryMessageCount: Int = 0
var workerMessageCount: Int = 0
val mediator: ActorRef = DistributedPubSub(context.system).mediator // get the mediator for sending cluster messages
mediator ! DistributedPubSubMediator.Put(self)
storage.foreach {
case (_, entityStorage) =>
entityStorage.apply(printing, managerCount, managerId, mediator)
}
/**
* Set up partition to report how many messages it has processed in the last X seconds
*/
override def supervisorStrategy: OneForOneStrategy = OneForOneStrategy() {
case e: Exception =>
e.printStackTrace()
Resume
}
override def preStart(): Unit = {
log.debug("Writer [{}] is being started.", managerId)
scheduleTasks()
}
override def postStop(): Unit = {
val allTasksCancelled = scheduledTaskMap.forall {
case (key, task) =>
cancelTask(key, task)
}
if (!allTasksCancelled) log.warning("Failed to cancel all scheduled tasks post stop.")
}
override def receive: Receive = {
case msg: String if msg == "count" => processCountMessage(msg)
case msg: String if msg == "keep_alive" => processKeepAliveMessage(msg)
case req: UpdatedCounter => processUpdatedCounterRequest(req)
case Terminated(child) =>
log.warning(s"WriterWorker with patch [{}] belonging to Writer [{}] has died.", child.path, managerId)
case x => log.warning(s"Writer [{}] received unknown [{}] message.", managerId, x)
}
def processCountMessage(msg: String): Unit = {
log.debug(s"Writer [{}] received [{}] message.", managerId, msg)
val newTime = System.currentTimeMillis() / 1000
var timeDifference = newTime - lastLogTime
if (timeDifference == 0) timeDifference = 1
}
def processKeepAliveMessage(msg: String): Unit = {
log.debug(s"Writer [{}] received [{}] message.", managerId, msg)
val sendMessage = PartitionUp(managerId)
val sendPath = "/user/WatchDog"
mediator ! DistributedPubSubMediator.Send(sendPath, sendMessage, localAffinity = false)
log.debug(s"DistributedPubSubMediator sent message [{}] to path [{}].", sendMessage, sendPath)
}
def processUpdatedCounterRequest(req: UpdatedCounter): Unit = {
log.debug(s"Writer [{}] received request [{}].", managerId, req)
managerCount = req.newValue
if (storage.isEmpty)
log.warning("Entity storage is empty. The request [{}] will not be acted upon.", req)
else
storage.foreach {
case (_, entityStorage) =>
log.debug("Setting manager count for [{}] to [{}].", entityStorage, managerCount)
entityStorage.setManagerCount(managerCount)
}
}
private def scheduleTasks(): Unit = {
log.debug("Preparing to schedule tasks in Writer [{}].", managerId)
val countCancellable =
scheduleTask(initialDelay = 10 seconds, interval = 1 seconds, receiver = self, message = "count")
scheduledTaskMap.put("count", countCancellable)
val keepAliveCancellable =
scheduleTask(initialDelay = 10 seconds, interval = 10 seconds, receiver = self, message = "keep_alive")
scheduledTaskMap.put("keep_alive", keepAliveCancellable)
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/RaphtoryGraph.scala
|
package com.raphtory
import akka.actor.{ActorSystem, Props}
import ch.qos.logback.classic.Level
import com.raphtory.api.Analyser
import com.raphtory.core.actors.AnalysisManager.{AnalysisManager, AnalysisRestApi}
import com.raphtory.core.actors.ClusterManagement.{RaphtoryReplicator, WatchDog, WatermarkManager}
import com.raphtory.core.actors.Router.GraphBuilder
import com.raphtory.core.actors.Spout.{Spout, SpoutAgent}
import com.raphtory.core.model.communication.{LiveAnalysisRequest, RangeAnalysisRequest, ViewAnalysisRequest}
import kamon.Kamon
import org.slf4j.LoggerFactory
object RaphtoryGraph {
def apply[T](spout: Spout[T], graphBuilder: GraphBuilder[T]) : RaphtoryGraph[T] =
new RaphtoryGraph(spout, graphBuilder)
def apply[T](spoutPath: String, graphBuilderPath: String) : RaphtoryGraph[T] ={
val spout = Class.forName(spoutPath).getConstructor().newInstance().asInstanceOf[Spout[T]]
val graphBuilder = Class.forName(graphBuilderPath).getConstructor().newInstance().asInstanceOf[GraphBuilder[T]]
new RaphtoryGraph(spout, graphBuilder)
}
}
class RaphtoryGraph[T](spout: Spout[T], graphBuilder: GraphBuilder[T]) {
Kamon.init() //start tool logging
// val root = LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[ch.qos.logback.classic.Logger]
//root.setLevel(Level.ERROR)
val system = ActorSystem("Citation-system")
val partitionNumber = 1
val minimumRouters = 1
system.actorOf(Props(new WatermarkManager(partitionNumber)),"WatermarkManager")
system.actorOf(Props(new WatchDog(partitionNumber, minimumRouters)), "WatchDog")
system.actorOf(Props(new SpoutAgent(spout)), "Spout")
system.actorOf(Props(RaphtoryReplicator.apply("Router", partitionNumber, minimumRouters,graphBuilder)), s"Routers")
system.actorOf(Props(RaphtoryReplicator("Partition Manager", partitionNumber,minimumRouters)), s"PartitionManager")
val analysisManager = system.actorOf(Props[AnalysisManager], s"AnalysisManager")
AnalysisRestApi(system)
//TODO tidy these, but will be done with full analysis Overhall
def rangeQuery(analyser:Analyser,start:Long,end:Long,increment:Long,args:Array[String]):Unit = {
analysisManager ! RangeAnalysisRequest(analyser.getClass.getCanonicalName,start,end,increment,"false",0L,Array[Long](),args,"")
}
def rangeQuery(analyser:Analyser,start:Long,end:Long,increment:Long,window:Long,args:Array[String]):Unit = {
analysisManager ! RangeAnalysisRequest(analyser.getClass.getCanonicalName,start,end,increment,"true",window,Array[Long](),args,"")
}
def rangeQuery(analyser:Analyser,start:Long,end:Long,increment:Long,windowBatch:Array[Long],args:Array[String]):Unit = {
analysisManager ! RangeAnalysisRequest(analyser.getClass.getCanonicalName,start,end,increment,"batched",0L,windowBatch,args,"")
}
def viewQuery(analyser:Analyser,timestamp:Long,args:Array[String]):Unit = {
analysisManager ! ViewAnalysisRequest(analyser.getClass.getCanonicalName,timestamp,"false",0L,Array[Long](),args,"")
}
def viewQuery(analyser:Analyser,timestamp:Long,window:Long,args:Array[String]):Unit = {
analysisManager ! ViewAnalysisRequest(analyser.getClass.getCanonicalName,timestamp,"true",window,Array[Long](),args,"")
}
def viewQuery(analyser:Analyser,timestamp:Long,windowBatch:Array[Long],args:Array[String]):Unit = {
analysisManager ! ViewAnalysisRequest(analyser.getClass.getCanonicalName,timestamp,"batched",0L,windowBatch,args,"")
}
def liveQuery(analyser:Analyser,repeat:Long,eventTime:Boolean,args:Array[String]):Unit = {
analysisManager ! LiveAnalysisRequest(analyser.getClass.getCanonicalName,repeat,eventTime,"false",0L,Array[Long](),args,"")
}
def liveQuery(analyser:Analyser,repeat:Long,eventTime:Boolean,window:Long,args:Array[String]):Unit = {
analysisManager ! LiveAnalysisRequest(analyser.getClass.getCanonicalName,repeat,eventTime,"true",window,Array[Long](),args,"")
}
def liveQuery(analyser:Analyser,repeat:Long,eventTime:Boolean,windowBatch:Array[Long],args:Array[String]):Unit = {
analysisManager ! LiveAnalysisRequest(analyser.getClass.getCanonicalName,repeat,eventTime,"batched",0L,windowBatch,args,"")
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/model/EntityStorage.scala
|
<reponame>dorely103/Raphtory<filename>mainproject/src/main/scala/com/raphtory/core/model/EntityStorage.scala
package com.raphtory.core.model
import akka.actor.ActorRef
import akka.cluster.pubsub.DistributedPubSubMediator
import com.raphtory.core.model.communication._
import com.raphtory.core.model.graphentities.{Edge, Entity, SplitEdge, Vertex}
import kamon.Kamon
import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap
/**
* Singleton representing the Storage for the entities
*/
//TODO add capacity function based on memory used and number of updates processed/stored in memory
//TODO What happens when an edge which has been archived gets readded
class EntityStorage(partitionID:Int,workerID: Int) {
val debug = System.getenv().getOrDefault("DEBUG", "false").trim.toBoolean
/**
* Map of vertices contained in the partition
*/
val vertices = ParTrieMap[Long, Vertex]()
var printing: Boolean = true
var managerCount: Int = 1
var managerID: Int = 0
var mediator: ActorRef = null
//stuff for compression and archiving
var oldestTime: Long = Long.MaxValue
var newestTime: Long = 0
var windowTime: Long = 0
val vertexCount = Kamon.counter("Raphtory_Vertex_Count").withTag("actor",s"PartitionWriter_$partitionID").withTag("ID",workerID)
val localEdgeCount = Kamon.counter("Raphtory_Local_Edge_Count").withTag("actor",s"PartitionWriter_$partitionID").withTag("ID",workerID)
val copySplitEdgeCount = Kamon.counter("Raphtory_Copy_Split_Edge_Count").withTag("actor",s"PartitionWriter_$partitionID").withTag("ID",workerID)
val masterSplitEdgeCount = Kamon.counter("Raphtory_Master_Split_Edge_Count").withTag("actor",s"PartitionWriter_$partitionID").withTag("ID",workerID)
def timings(updateTime: Long) = {
if (updateTime < oldestTime && updateTime > 0) oldestTime = updateTime
if (updateTime > newestTime)
newestTime = updateTime //this isn't thread safe, but is only an approx for the archiving
}
def apply(printing: Boolean, managerCount: Int, managerID: Int, mediator: ActorRef) = {
this.printing = printing
this.managerCount = managerCount
this.managerID = managerID
this.mediator = mediator
this
}
def setManagerCount(count: Int) = this.managerCount = count
def addProperties(msgTime: Long, entity: Entity, properties: Properties) =
if (properties != null)
properties.property.foreach {
case StringProperty(key, value) => entity + (msgTime, false, key, value)
case LongProperty(key, value) => entity + (msgTime, false, key, value)
case DoubleProperty(key, value) => entity + (msgTime, false, key, value)
case ImmutableProperty(key, value) => entity + (msgTime, true, key, value)
}
// if the add come with some properties add all passed properties into the entity
def vertexAdd(msgTime: Long, srcId: Long, properties: Properties = null, vertexType: Type): Vertex = { //Vertex add handler function
val vertex: Vertex = vertices.get(srcId) match { //check if the vertex exists
case Some(v) => //if it does
v revive msgTime //add the history point
v
case None => //if it does not exist
val v = new Vertex(msgTime, srcId, initialValue = true) //create a new vertex
vertexCount.increment()
if (!(vertexType == null)) v.setType(vertexType.name)
vertices put (srcId, v) //put it in the map
v
}
addProperties(msgTime, vertex, properties)
vertex //return the vertex
}
def getVertexOrPlaceholder(msgTime: Long, id: Long): Vertex =
vertices.get(id) match {
case Some(vertex) => vertex
case None =>
vertexCount.increment()
val vertex = new Vertex(msgTime, id, initialValue = true)
vertices put (id, vertex)
vertex wipe ()
vertex
}
def vertexWorkerRequest(msgTime: Long, dstID: Long, srcID: Long, edge: Edge, present: Boolean,routerID:String,routerTime:Int,spoutTime:Long) = {
val dstVertex = vertexAdd(msgTime, dstID, vertexType = null) //if the worker creating an edge does not deal with the destination
if (!present) {
dstVertex.incrementEdgesRequiringSync()
dstVertex addIncomingEdge edge // do the same for the destination node
mediator ! DistributedPubSubMediator.Send( //if this edge is new
getManager(srcID, managerCount),
DstResponseFromOtherWorker(msgTime, srcID, dstID, dstVertex.removeList, routerID, routerTime,spoutTime),
false
)
}
else
mediator ! DistributedPubSubMediator.Send( //if this edge is not new we just need to ack
getManager(srcID, managerCount),
EdgeSyncAck(msgTime, routerID, routerTime,spoutTime),
false
)
}
def vertexWipeWorkerRequest(msgTime: Long, dstID: Long, srcID: Long, edge: Edge, present: Boolean,routerID:String,routerTime:Int,spoutTime:Long) = {
val dstVertex = getVertexOrPlaceholder(msgTime, dstID) // if the worker creating an edge does not deal with do the same for the destination ID
if (!present) {
dstVertex.incrementEdgesRequiringSync()
dstVertex addIncomingEdge edge // do the same for the destination node
mediator ! DistributedPubSubMediator.Send( //as it is new respond with teh deletions
getManager(srcID, managerCount),
DstResponseFromOtherWorker(msgTime, srcID, dstID, dstVertex.removeList,routerID,routerTime,spoutTime),
false
)
}
else
mediator ! DistributedPubSubMediator.Send( //if this edge is not new we just need to ack
getManager(srcID, managerCount),
EdgeSyncAck(msgTime, routerID, routerTime,spoutTime),
false
)
}
def vertexWorkerRequestEdgeHandler(
msgTime: Long,
srcID: Long,
dstID: Long,
removeList: mutable.TreeMap[Long, Boolean]
): Unit =
getVertexOrPlaceholder(msgTime, srcID).getOutgoingEdge(dstID) match {
case Some(edge) => edge killList removeList //add the dst removes into the edge
case None => println("Oh no")
}
def vertexRemoval(msgTime: Long, srcId: Long,routerID:String,routerTime:Int,spoutTime:Long):Int = {
val vertex: Vertex = vertices.get(srcId) match {
case Some(v) =>
v kill msgTime
v
case None => //if the removal has arrived before the creation
vertexCount.increment()
val v = new Vertex(msgTime, srcId, initialValue = false) //create a placeholder
vertices put (srcId, v) //add it to the map
v
}
//todo decide with hamza which one to use
// vertex.incomingEdges.values.foreach {
// case edge @ (remoteEdge: SplitEdge) =>
// edge kill msgTime
// mediator ! DistributedPubSubMediator.Send(
// getManager(remoteEdge.getSrcId, managerCount),
// ReturnEdgeRemoval(msgTime, remoteEdge.getSrcId, remoteEdge.getDstId,routerID,routerTime),
// false
// ) //inform the other partition to do the same
// case edge => //if it is a local edge -- opperated by the same worker, therefore we can perform an action -- otherwise we must inform the other local worker to handle this
// if (edge.getWorkerID == workerID) edge kill msgTime
// else
// mediator ! DistributedPubSubMediator.Send(
// getManager(edge.getSrcId, managerCount),
// EdgeRemoveForOtherWorker(msgTime, edge.getSrcId, edge.getDstId,routerID,routerTime),
// false
// ) //
// }
// vertex.outgoingEdges.values.foreach {
// case edge @ (remoteEdge: SplitEdge) =>
// edge kill msgTime //outgoing edge always opperated by the same worker, therefore we can perform an action
// mediator ! DistributedPubSubMediator.Send(
// getManager(edge.getDstId, managerCount),
// RemoteEdgeRemovalFromVertex(msgTime, remoteEdge.getSrcId, remoteEdge.getDstId,routerID,routerTime),
// false
// )
// case edge =>
// edge kill msgTime //outgoing edge always opperated by the same worker, therefore we can perform an action
// }
val incomingCount = vertex.incomingEdges.map(edge => {
edge._2 match {
case edge@(remoteEdge: SplitEdge) =>
edge kill msgTime
mediator ! DistributedPubSubMediator.Send(
getManager(remoteEdge.getSrcId, managerCount),
ReturnEdgeRemoval(msgTime, remoteEdge.getSrcId, remoteEdge.getDstId, routerID, routerTime,spoutTime),
false
) //inform the other partition to do the same
1
case edge => //if it is a local edge -- opperated by the same worker, therefore we can perform an action -- otherwise we must inform the other local worker to handle this
if (edge.getWorkerID == workerID) {
edge kill msgTime
0
}
else {
mediator ! DistributedPubSubMediator.Send(
getManager(edge.getSrcId, managerCount),
EdgeRemoveForOtherWorker(msgTime, edge.getSrcId, edge.getDstId, routerID, routerTime,spoutTime),
false
) //
1
}
}
})
val outgoingCount = vertex.outgoingEdges.map (edge=>{
edge._2 match {
case edge@(remoteEdge: SplitEdge) =>
edge kill msgTime //outgoing edge always opperated by the same worker, therefore we can perform an action
mediator ! DistributedPubSubMediator.Send(
getManager(edge.getDstId, managerCount),
RemoteEdgeRemovalFromVertex(msgTime, remoteEdge.getSrcId, remoteEdge.getDstId, routerID, routerTime,spoutTime),
false
)
1
case edge =>
edge kill msgTime //outgoing edge always opperated by the same worker, therefore we can perform an action
0
}
})
if(!(incomingCount.sum+outgoingCount.sum == vertex.getEdgesRequringSync()))
println(s"Incorrect ${incomingCount.sum+outgoingCount.sum} ${vertex.getEdgesRequringSync()}")
incomingCount.sum+outgoingCount.sum
}
/**
* Edges Methods
*/
def edgeAdd(msgTime: Long, srcId: Long, dstId: Long,routerID:String,routerTime:Int, properties: Properties = null, edgeType: Type,spoutTime:Long):Boolean = {
val local = checkDst(dstId, managerCount, managerID) //is the dst on this machine
val sameWorker = checkWorker(dstId, managerCount, workerID) // is the dst handled by the same worker
val srcVertex = vertexAdd(msgTime, srcId, vertexType = null) // create or revive the source ID
var present = false //if the vertex is new or not -- decides what update is sent when remote and if to add the source/destination removals
var edge: Edge = null
srcVertex.getOutgoingEdge(dstId) match {
case Some(e) => //retrieve the edge if it exists
edge = e
present = true
case None => //if it does not
if (local) {
edge = new Edge(workerID, msgTime, srcId, dstId, initialValue = true) //create the new edge, local or remote
localEdgeCount.increment()
} else {
edge = new SplitEdge(workerID, msgTime, srcId, dstId, initialValue = true)
masterSplitEdgeCount.increment()
}
if (!(edgeType == null)) edge.setType(edgeType.name)
srcVertex.addOutgoingEdge(edge) //add this edge to the vertex
}
if (local && srcId != dstId)
if (sameWorker) { //if the dst is handled by the same worker
val dstVertex = vertexAdd(msgTime, dstId, vertexType = null) // do the same for the destination ID
if (!present) {
dstVertex addIncomingEdge (edge) // add it to the dst as would not have been seen
edge killList dstVertex.removeList //add the dst removes into the edge
}
} else // if it is a different worker, ask that other worker to complete the dst part of the edge
mediator ! DistributedPubSubMediator
.Send(getManager(dstId, managerCount), DstAddForOtherWorker(msgTime, dstId, srcId, edge, present, routerID:String, routerTime,spoutTime), true)
if (present) {
edge revive msgTime //if the edge was previously created we need to revive it
if (!local) // if it is a remote edge we
mediator ! DistributedPubSubMediator.Send(
getManager(dstId, managerCount),
RemoteEdgeAdd(msgTime, srcId, dstId, properties, edgeType, routerID:String, routerTime,spoutTime),
false
) // inform the partition dealing with the destination node*/
} else { // if this is the first time we have seen the edge
val deaths = srcVertex.removeList //we extract the removals from the src
edge killList deaths // add them to the edge
if (!local) // and if not local sync with the other partition
mediator ! DistributedPubSubMediator.Send(
getManager(dstId, managerCount),
RemoteEdgeAddNew(msgTime, srcId, dstId, properties, deaths, edgeType, routerID:String, routerTime,spoutTime),
false
)
}
addProperties(msgTime, edge, properties)
if(!local && !present) //if its not fully local and is new then increment the count for edges requireing a watermark count
srcVertex.incrementEdgesRequiringSync()
local && sameWorker //return if the edge has no sync
}
def remoteEdgeAddNew(
msgTime: Long,
srcId: Long,
dstId: Long,
properties: Properties,
srcDeaths: mutable.TreeMap[Long, Boolean],
edgeType: Type,
routerID:String,
routerTime:Int,
spoutTime:Long
): Unit = {
val dstVertex = vertexAdd(msgTime, dstId, vertexType = null) //create or revive the destination node
val edge = new SplitEdge(workerID, msgTime, srcId, dstId, initialValue = true)
copySplitEdgeCount.increment()
dstVertex addIncomingEdge (edge) //add the edge to the associated edges of the destination node
val deaths = dstVertex.removeList //get the destination node deaths
edge killList srcDeaths //pass source node death lists to the edge
edge killList deaths // pass destination node death lists to the edge
addProperties(msgTime, edge, properties)
dstVertex.incrementEdgesRequiringSync()
if (!(edgeType == null)) edge.setType(edgeType.name)
mediator ! DistributedPubSubMediator
.Send(getManager(srcId, managerCount), RemoteReturnDeaths(msgTime, srcId, dstId, deaths, routerID, routerTime,spoutTime), false)
}
def remoteEdgeAdd(msgTime: Long, srcId: Long, dstId: Long, properties: Properties = null, edgeType: Type,routerID:String,routerTime:Int,spoutTime:Long): Unit = {
val dstVertex = vertexAdd(msgTime, dstId, vertexType = null) // revive the destination node
dstVertex.getIncomingEdge(srcId) match {
case Some(edge) =>
edge revive msgTime //revive the edge
addProperties(msgTime, edge, properties)
case None => /*todo should this happen */
}
mediator ! DistributedPubSubMediator.Send(getManager(srcId, managerCount), EdgeSyncAck(msgTime, routerID, routerTime,spoutTime), true)
}
def edgeRemoval(msgTime: Long, srcId: Long, dstId: Long, routerID: String, routerTime: Int,spoutTime:Long): Boolean = {
val local = checkDst(dstId, managerCount, managerID)
val sameWorker = checkWorker(dstId, managerCount, workerID) // is the dst handled by the same worker
var present = false
var edge: Edge = null
var srcVertex: Vertex = getVertexOrPlaceholder(msgTime, srcId)
srcVertex.getOutgoingEdge(dstId) match {
case Some(e) =>
edge = e
present = true
case None =>
if (local) {
localEdgeCount.increment()
edge = new Edge(workerID, msgTime, srcId, dstId, initialValue = false)
} else {
masterSplitEdgeCount.increment()
edge = new SplitEdge(workerID, msgTime, srcId, dstId, initialValue = false)
}
srcVertex addOutgoingEdge (edge) // add the edge to the associated edges of the source node
}
if (local && srcId != dstId)
if (sameWorker) { //if the dst is handled by the same worker
val dstVertex = getVertexOrPlaceholder(msgTime, dstId) // do the same for the destination ID
if (!present) {
dstVertex addIncomingEdge (edge) // do the same for the destination node
edge killList dstVertex.removeList //add the dst removes into the edge
}
} else // if it is a different worker, ask that other worker to complete the dst part of the edge
mediator ! DistributedPubSubMediator
.Send(getManager(dstId, managerCount), DstWipeForOtherWorker(msgTime, dstId, srcId, edge, present, routerID, routerTime,spoutTime), true)
if (present) {
edge kill msgTime
if (!local)
mediator ! DistributedPubSubMediator.Send(
getManager(dstId, managerCount),
RemoteEdgeRemoval(msgTime, srcId, dstId, routerID, routerTime,spoutTime),
false
) // inform the partition dealing with the destination node
} else {
val deaths = srcVertex.removeList
edge killList deaths
if (!local)
mediator ! DistributedPubSubMediator
.Send(getManager(dstId, managerCount), RemoteEdgeRemovalNew(msgTime, srcId, dstId, deaths, routerID, routerTime,spoutTime), false)
}
if(!local && !present) //if its not fully local and is new then increment the count for edges requireing a watermark count
srcVertex.incrementEdgesRequiringSync()
local && sameWorker
}
def returnEdgeRemoval(msgTime: Long, srcId: Long, dstId: Long,routerID:String,routerTime:Int,spoutTime:Long): Unit = { //for the source getting an update about deletions from a remote worker
getVertexOrPlaceholder(msgTime, srcId).getOutgoingEdge(dstId) match {
case Some(edge) => edge kill msgTime
case None => //todo should this happen
}
mediator ! DistributedPubSubMediator.Send( // ack the destination holder that this is all sorted
getManager(dstId, managerCount),
VertexRemoveSyncAck(msgTime, routerID, routerTime,spoutTime),
false
)
}
def edgeRemovalFromOtherWorker(msgTime: Long, srcID: Long, dstID: Long,routerID:String,routerTime:Int,spoutTime:Long) = {
getVertexOrPlaceholder(msgTime, srcID).getOutgoingEdge(dstID) match {
case Some(edge) => edge kill msgTime
case None => //todo should this happen?
}
mediator ! DistributedPubSubMediator.Send( // ack the destination holder that this is all sorted
getManager(dstID, managerCount),
VertexRemoveSyncAck(msgTime, routerID, routerTime,spoutTime),
false
)
}
def remoteEdgeRemoval(msgTime: Long, srcId: Long, dstId: Long,routerID:String,routerTime:Int,spoutTime:Long): Unit = {
val dstVertex = getVertexOrPlaceholder(msgTime, dstId)
dstVertex.getIncomingEdge(srcId) match {
case Some(e) => e kill msgTime
case None => println(s"Worker ID $workerID Manager ID $managerID")
}
mediator ! DistributedPubSubMediator.Send( //if this edge is not new we just need to ack
getManager(srcId, managerCount),
EdgeSyncAck(msgTime, routerID, routerTime,spoutTime),
false
)
}
def remoteEdgeRemovalFromVertex(msgTime: Long, srcId: Long, dstId: Long,routerID:String,routerTime:Int,spoutTime:Long): Unit = {
val dstVertex = getVertexOrPlaceholder(msgTime, dstId)
dstVertex.getIncomingEdge(srcId) match {
case Some(e) => e kill msgTime
case None => println(s"Worker ID $workerID Manager ID $managerID")
}
mediator ! DistributedPubSubMediator.Send( //if this edge is not new we just need to ack
getManager(srcId, managerCount),
VertexRemoveSyncAck(msgTime, routerID, routerTime,spoutTime),
false
)
}
def remoteEdgeRemovalNew(msgTime: Long, srcId: Long, dstId: Long, srcDeaths: mutable.TreeMap[Long, Boolean],routerID:String,routerTime:Int,spoutTime:Long): Unit = {
val dstVertex = getVertexOrPlaceholder(msgTime, dstId)
dstVertex.incrementEdgesRequiringSync()
copySplitEdgeCount.increment()
val edge = new SplitEdge(workerID, msgTime, srcId, dstId, initialValue = false)
dstVertex addIncomingEdge (edge) //add the edge to the destination nodes associated list
val deaths = dstVertex.removeList //get the destination node deaths
edge killList srcDeaths //pass source node death lists to the edge
edge killList deaths // pass destination node death lists to the edge
mediator ! DistributedPubSubMediator
.Send(getManager(srcId, managerCount), RemoteReturnDeaths(msgTime, srcId, dstId, deaths,routerID,routerTime,spoutTime), false)
}
def remoteReturnDeaths(msgTime: Long, srcId: Long, dstId: Long, dstDeaths: mutable.TreeMap[Long, Boolean]): Unit = {
if (printing) println(s"Received deaths for $srcId --> $dstId from ${getManager(dstId, managerCount)}")
getVertexOrPlaceholder(msgTime, srcId).getOutgoingEdge(dstId) match {
case Some(edge) => edge killList dstDeaths
case None => /*todo Should this happen*/
}
}
//TODO these are placed here until YanYangs changes can be integrated
def getManager(srcId: Long, managerCount: Int): String = {
val mod = srcId.abs % (managerCount * 10)
val manager = mod / 10
val worker = mod % 10
s"/user/Manager_${manager}_child_$worker"
}
def checkDst(dstID: Long, managerCount: Int, managerID: Int): Boolean = ((dstID.abs % (managerCount * 10)) / 10).toInt == managerID //check if destination is also local
def checkWorker(dstID: Long, managerCount: Int, workerID: Int): Boolean = ((dstID.abs % (managerCount * 10)) % 10).toInt == workerID //check if destination is also local
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/RaphtoryActor.scala
|
package com.raphtory.core.actors
import akka.actor.{Actor, ActorContext, ActorLogging, ActorRef, Cancellable, Timers}
import scala.collection.mutable
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration
trait RaphtoryActor extends Actor with ActorLogging with Timers {
val partitionsTopic = "/partitionsCount"
val totalWorkers = 10 //must be power of 10
//get the partition a vertex is stored in
def checkDst(dstID: Long, managerCount: Int, managerID: Int): Boolean = ((dstID.abs % (managerCount * totalWorkers)) / totalWorkers).toInt == managerID //check if destination is also local
def checkWorker(dstID: Long, managerCount: Int, workerID: Int): Boolean = ((dstID.abs % (managerCount * totalWorkers)) % totalWorkers).toInt == workerID //check if destination is also local
def getManager(srcId: Long, managerCount: Int): String = {
val mod = srcId.abs % (managerCount * totalWorkers)
val manager = mod / totalWorkers
val worker = mod % totalWorkers
s"/user/Manager_${manager}_child_$worker"
}
def getReader(srcId: Long, managerCount: Int): String = {
val mod = srcId.abs % (managerCount * totalWorkers)
val manager = mod / totalWorkers
val worker = mod % totalWorkers
s"/user/Manager_${manager}_reader_$worker"
}
def getAllRouterWorkers(managerCount: Int): Array[String] = {
val workers = mutable.ArrayBuffer[String]()
for (i <- 0 until managerCount)
for (j <- 0 until totalWorkers)
workers += s"/user/router/router_${i}_Worker_$j"
workers.toArray
}
def getAllReaders(managerCount: Int): Array[String] = {
val workers = mutable.ArrayBuffer[String]()
for (i <- 0 until managerCount)
workers += s"/user/ManagerReader_$i"
workers.toArray
}
def getAllReaderWorkers(managerCount: Int): Array[String] = {
val workers = mutable.ArrayBuffer[String]()
for (i <- 0 until managerCount)
for (j <- 0 until totalWorkers)
workers += s"/user/Manager_${i}_reader_$j"
workers.toArray
}
def scheduleTask(initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, message: Any)(
implicit context: ActorContext,
executor: ExecutionContext,
sender: ActorRef = Actor.noSender
): Cancellable = {
val scheduler = context.system.scheduler
val cancellable = scheduler.schedule(initialDelay, interval, receiver, message)(executor,self)
context.system.log.debug("The message [{}] has been scheduled for send to [{}].", message, receiver.path)
cancellable
}
def scheduleTaskOnce(
delay: FiniteDuration,
receiver: ActorRef,
message: Any
)(implicit context: ActorContext, executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = {
val scheduler = context.system.scheduler
val cancellable = scheduler.scheduleOnce(delay, receiver, message)(executor,self)
context.system.log.debug("The message [{}] has been scheduled for send to [{}].", message, receiver.path)
cancellable
}
def cancelTask(key: String, task: Cancellable)(implicit context: ActorContext): Boolean = {
task.cancel()
val isCancelled = task.isCancelled
if (isCancelled)
context.system.log.debug("The task [{}] has been cancelled.", key)
else
context.system.log.debug("Failed to cancel the task [{}].", key)
isCancelled
}
object sortOrdering extends Ordering[Long] {
def compare(key1: Long, key2: Long) = key2.compareTo(key1)
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/Tasks/LiveTasks/BWindowedLiveAnalysisTask.scala
|
package com.raphtory.analysis.Tasks.LiveTasks
import com.raphtory.api.Analyser
import com.raphtory.core.model.communication.AnalysisType
import scala.collection.mutable.ArrayBuffer
class BWindowedLiveAnalysisTask(managerCount:Int, jobID: String,args:Array[String], analyser: Analyser,repeatTime:Long,eventTime:Boolean, windowset:Array[Long],newAnalyser:Boolean,rawFile:String)
extends LiveAnalysisTask(managerCount,jobID, args,analyser,repeatTime,eventTime,newAnalyser,rawFile) {
override def result(): ArrayBuffer[Any] = {
val original = super.result()
if (original.nonEmpty) {
val invertedArray = ArrayBuffer[ArrayBuffer[Any]]()
for (i <- original(0).asInstanceOf[ArrayBuffer[Any]].indices)
invertedArray += new ArrayBuffer[Any]()
original.foreach { x =>
val internal = x.asInstanceOf[ArrayBuffer[Any]]
for (j <- internal.indices)
invertedArray(j) += internal(j)
}
invertedArray.asInstanceOf[ArrayBuffer[Any]]
} else original
}
override def windowSet(): Array[Long] = windowset.sortBy(x=>x)(sortOrdering)
override protected def analysisType(): AnalysisType.Value = AnalysisType.live
override def processResults(time: Long): Unit = {
var i = 0
val vtime = viewCompleteTime
result().asInstanceOf[ArrayBuffer[ArrayBuffer[Any]]].foreach(res =>{
analyser.processWindowResults(res, timestamp(), windowSet()(i), vtime)
i+=1
})
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/model/analysis/entityVisitors/EdgeVisitor.scala
|
package com.raphtory.core.model.analysis.entityVisitors
import akka.actor.{ActorContext, ActorRef}
import akka.cluster.pubsub.DistributedPubSubMediator
import com.raphtory.api.ManagerCount
import com.raphtory.core.actors.PartitionManager.Workers.ViewJob
import com.raphtory.core.model.analysis.GraphLenses.GraphLens
import com.raphtory.core.model.communication.{ImmutableProperty, VertexMessage}
import com.raphtory.core.model.graphentities.{Edge, MutableProperty}
import scala.collection.mutable
class EdgeVisitor(edge:Edge,id:Long,viewJob:ViewJob,superStep:Int,view:GraphLens,mediator: ActorRef)(implicit context: ActorContext, managerCount: ManagerCount) extends EntityVisitor(edge,viewJob:ViewJob) {
def ID() = id
def src() = edge.getSrcId
def dst() = edge.getDstId
def send(data: Any): Unit = {
val message = VertexMessage(id, viewJob, superStep, data)
view.recordMessage()
mediator ! DistributedPubSubMediator.Send(getReader(id, managerCount.count), message, false)
}
//TODO edge properties
private def getEdgePropertyValuesAfterTime(edge: Edge, key: String, time: Long, window: Long): Option[mutable.TreeMap[Long, Any]] =
if (window == -1L)
edge.properties.get(key) match {
case Some(p: MutableProperty) => Some(p.previousState.filter(x => x._1 <= time))
case Some(p: ImmutableProperty) => Some(mutable.TreeMap[Long, Any]((-1L -> p.currentValue)))
case None => None
}
else
edge.properties.get(key) match {
case Some(p: MutableProperty) => Some(p.previousState.filter(x => x._1 <= time && time - x._1 <= window))
case Some(p: ImmutableProperty) => Some(mutable.TreeMap[Long, Any]((-1L -> p.currentValue)))
case None => None
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/Router/GraphBuilder.scala
|
<reponame>dorely103/Raphtory
package com.raphtory.core.actors.Router
import com.raphtory.core.model.communication.GraphUpdate
import scala.collection.mutable
import scala.util.hashing.MurmurHash3
trait GraphBuilder[T]{
var updates:mutable.HashSet[GraphUpdate] = mutable.HashSet[GraphUpdate]()
def getUpdates() = { //TODO hide from users
val toReturn = updates
updates = mutable.HashSet[GraphUpdate]()
toReturn
}
def sendUpdate(update:GraphUpdate):Unit ={
updates += update
}
protected def assignID(uniqueChars: String): Long = MurmurHash3.stringHash(uniqueChars)
def parseTuple(tuple: T):Unit
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/Tasks/LiveTasks/LiveAnalysisTask.scala
|
<reponame>dorely103/Raphtory
package com.raphtory.analysis.Tasks.LiveTasks
import akka.cluster.pubsub.DistributedPubSubMediator
import com.raphtory.api.Analyser
import com.raphtory.analysis.Tasks.AnalysisTask
import com.raphtory.core.model.communication.{AnalysisType, Finish, Setup, TimeCheck}
import scala.collection.mutable
import scala.concurrent.duration.{Duration, MILLISECONDS}
class LiveAnalysisTask(managerCount:Int, jobID: String, args:Array[String],analyser: Analyser,repeatTime:Long,eventTime:Boolean,newAnalyser:Boolean,rawFile:String)
extends AnalysisTask(jobID,args, analyser,managerCount,newAnalyser,rawFile) {
//implicit val executionContext = context.system.dispatchers.lookup("misc-dispatcher")
override protected def analysisType(): AnalysisType.Value = AnalysisType.live
protected var currentTimestamp = 1L
override def timestamp(): Long = currentTimestamp
private var liveTimes:mutable.Set[Long] = mutable.Set[Long]()
private var liveTime = 0l
private var firstTime = true
def liveTimestamp():Long = liveTime
def setLiveTime() = {
liveTime = liveTimes.min
liveTimes = mutable.Set[Long]()
if(!eventTime||firstTime){
firstTime=false
currentTimestamp=liveTime
}
}
def resetTimes() = {
liveTimes = mutable.Set[Long]()
}
override def restartTime(): Long = { //if processing time we just waiting that long
if(eventTime) 0 else repeatTime
}
override def restart() = {
if (repeatTime>0) {//if we want to restart
if(eventTime){ //if its event time then we wait for the repeat time to be represented in the storage
currentTimestamp=liveTime+repeatTime
for (worker <- getAllReaderWorkers(managerCount))
mediator ! DistributedPubSubMediator.Send(worker, TimeCheck(timestamp()), false)
}
else{
currentTimestamp=liveTime
for (worker <- getAllReaderWorkers(managerCount))
mediator ! DistributedPubSubMediator.Send(worker, TimeCheck(timestamp), false)
}
}
}
override def timeResponse(ok: Boolean, time: Long) = {
if (!ok)
TimeOKFlag = false
TimeOKACKS += 1
liveTimes += time
if (TimeOKACKS == getWorkerCount) {
stepCompleteTime() //reset step counter
if (TimeOKFlag) {
setLiveTime()
if (analyser.defineMaxSteps() > 1)
for (worker <- getAllReaderWorkers(managerCount))
mediator ! DistributedPubSubMediator.Send(
worker,
Setup(
this.generateAnalyzer,
jobID,
args,
currentSuperStep,
timestamp,
analysisType(),
windowSize(),
windowSet()
),
false
)
else
for (worker <- getAllReaderWorkers(managerCount))
mediator ! DistributedPubSubMediator.Send(
worker,
Finish(
this.generateAnalyzer,
jobID,
args,
currentSuperStep,
timestamp,
analysisType(),
windowSize(),
windowSet()
),
false
)
} else {
//println(s"${timestamp()} is yet to be ingested, currently at ${time}. Retrying analysis in 1 seconds and retrying")
resetTimes()
context.system.scheduler.scheduleOnce(Duration(1000, MILLISECONDS), self, "recheckTime")
}
TimeOKACKS = 0
TimeOKFlag = true
}
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/api/Serialiser.scala
|
<filename>mainproject/src/main/scala/com/raphtory/api/Serialiser.scala
package com.raphtory.api
import java.io.{BufferedWriter, File, FileWriter}
import com.raphtory.core.model.analysis.entityVisitors.{EdgeVisitor, VertexVisitor}
import scala.collection.mutable.ArrayBuffer
abstract class Serialiser extends Analyser(null){
val path = s"${sys.env.getOrElse("SERIALISER_PATH", "")}"
def serialiseVertex(v:VertexVisitor):String
def serialiseEdge(e:EdgeVisitor):String
def startOfFile():String
def middleOfFile():String
def endOfFile():String
def fileExtension(): String = {"txt"}
def rowDelimeter(): String = {",\n"}
override def returnResults(): Any = {
val serialisedEntities = view.getVertices().map { vertex =>
(serialiseVertex(vertex),vertex.getOutEdges.map(e=> serialiseEdge(e)).toArray)
}
(serialisedEntities.map(x=>x._1).toArray,serialisedEntities.flatMap(x=>x._2).toArray)
}
override def defineMaxSteps(): Int = 1
override def processResults(results: ArrayBuffer[Any], timeStamp: Long, viewCompleteTime: Long): Unit = {
val serialisedResults = results.asInstanceOf[ArrayBuffer[(Array[String],Array[String])]]
val file = new File(s"$path/Raphtory_Snapshot_$timeStamp.${fileExtension()}")
write((serialisedResults.flatMap(x=>x._1).toArray,serialisedResults.flatMap(x=>x._2).toArray),file)
}
override def processWindowResults(results: ArrayBuffer[Any], timestamp: Long, windowSize: Long, viewCompleteTime: Long): Unit = {
val serialisedResults = results.asInstanceOf[ArrayBuffer[(Array[String],Array[String])]]
val file = new File(s"$path/Raphtory_Snapshot_${timestamp}_$windowSize.${fileExtension()}")
write((serialisedResults.flatMap(x=>x._1).toArray,serialisedResults.flatMap(x=>x._2).toArray),file)
}
def write(serialisedResults:(Array[String],Array[String]),file:File) = {
// println("write :"+serialisedResults._1.length+":"++serialisedResults._2.length)
println("vertices & edges", serialisedResults._1.length, serialisedResults._2.length)
val bw = new BufferedWriter(new FileWriter(file))
bw.write(startOfFile())
bw.write(serialisedResults._1.mkString(rowDelimeter()))
bw.write(middleOfFile())
bw.write(serialisedResults._2.mkString(rowDelimeter()))
bw.write(endOfFile())
bw.newLine()
bw.close()
}
override def analyse(): Unit = {}
override def setup(): Unit = {}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/model/communication/raphtoryMessages.scala
|
<filename>mainproject/src/main/scala/com/raphtory/core/model/communication/raphtoryMessages.scala
package com.raphtory.core.model.communication
import com.raphtory.api.Analyser
import com.raphtory.core.actors.PartitionManager.Workers.ViewJob
import com.raphtory.core.model.graphentities.Edge
import scala.collection.mutable
/**
* Created by Mirate on 30/05/2017.
*/
sealed trait GraphUpdate {
def msgTime: Long
def srcID: Long
}
sealed trait TrackedGraphUpdate{
val spoutTime:Long
}
case class DataFinished()
case class DataFinishedSync(time:Long)
case class RouterUp(id: Int)
case class PartitionUp(id: Int)
case class ClusterStatusRequest()
case class ClusterStatusResponse(clusterUp: Boolean,pmCounter:Int,roCounter:Int)
sealed trait Property {
def key: String
def value: Any
}
case class Type(name: String)
case class ImmutableProperty(override val key: String, override val value: String) extends Property
case class StringProperty(override val key: String, override val value: String) extends Property
case class LongProperty(override val key: String, override val value: Long) extends Property
case class DoubleProperty(override val key: String, override val value: Double) extends Property
case class Properties(property: Property*)
case class VertexAdd(msgTime: Long, override val srcID: Long, vType: Type = null) extends GraphUpdate //add a vertex (or add/update a property to an existing vertex)
case class TrackedVertexAdd(routerID: String, messageID:Int, spoutTime: Long, update:VertexAdd) extends TrackedGraphUpdate
case class VertexAddWithProperties(msgTime: Long, override val srcID: Long, properties: Properties, vType: Type = null) extends GraphUpdate
case class TrackedVertexAddWithProperties(routerID: String,messageID:Int, spoutTime: Long,update:VertexAddWithProperties) extends TrackedGraphUpdate
case class VertexDelete(msgTime: Long, override val srcID: Long) extends GraphUpdate
case class TrackedVertexDelete(routerID: String,messageID:Int, spoutTime: Long,update:VertexDelete) extends TrackedGraphUpdate
case class EdgeAdd(msgTime: Long, srcID: Long, dstID: Long, eType: Type = null) extends GraphUpdate
case class TrackedEdgeAdd(routerID: String,messageID:Int, spoutTime: Long,update:EdgeAdd) extends TrackedGraphUpdate
case class EdgeAddWithProperties(msgTime: Long, override val srcID: Long, dstID: Long, properties: Properties, eType: Type = null) extends GraphUpdate
case class TrackedEdgeAddWithProperties(routerID: String,messageID:Int, spoutTime: Long,update:EdgeAddWithProperties) extends TrackedGraphUpdate
case class EdgeDelete(msgTime: Long, override val srcID: Long, dstID: Long) extends GraphUpdate
case class TrackedEdgeDelete(routerID: String,messageID:Int, spoutTime: Long,update:EdgeDelete) extends TrackedGraphUpdate
case class RemoteEdgeAdd(msgTime: Long, srcID: Long, dstID: Long, properties: Properties, eType: Type, routerID: String, routerTime: Int,spoutTime:Long)
case class RemoteEdgeRemoval(msgTime: Long, srcID: Long, dstID: Long, routerID: String, routerTime: Int,spoutTime:Long)
case class RemoteEdgeRemovalFromVertex(msgTime: Long, srcID: Long, dstID: Long, routerID: String, routerTime: Int,spoutTime:Long)
case class RemoteEdgeAddNew(msgTime: Long, srcID: Long, dstID: Long, properties: Properties, kills: mutable.TreeMap[Long, Boolean], vType: Type, routerID: String, routerTime: Int,spoutTime:Long)
case class RemoteEdgeRemovalNew(msgTime: Long, srcID: Long, dstID: Long, kills: mutable.TreeMap[Long, Boolean], routerID: String, routerTime: Int,spoutTime:Long)
case class RemoteReturnDeaths(msgTime: Long, srcID: Long, dstID: Long, kills: mutable.TreeMap[Long, Boolean], routerID: String, routerTime: Int,spoutTime:Long)
case class ReturnEdgeRemoval(msgTime: Long, srcID: Long, dstID: Long,routerID:String,routerTime:Int,spoutTime:Long)
//BLOCK FROM WORKER SYNC
case class DstAddForOtherWorker(msgTime: Long, dstID: Long, srcForEdge: Long, edge: Edge, present: Boolean, routerID: String, routerTime: Int,spoutTime:Long)
case class DstWipeForOtherWorker(msgTime: Long, dstID: Long, srcForEdge: Long, edge: Edge, present: Boolean, routerID: String, routerTime: Int,spoutTime:Long)
case class DstResponseFromOtherWorker(msgTime: Long, srcForEdge: Long, dstID: Long, removeList: mutable.TreeMap[Long, Boolean], routerID: String, routerTime: Int,spoutTime:Long)
case class EdgeRemoveForOtherWorker(msgTime: Long, srcID: Long, dstID: Long,routerID: String, routerTime: Int,spoutTime:Long)
case class EdgeSyncAck(msgTime: Long, routerID: String, routerTime: Int,spoutTime:Long)
case class VertexRemoveSyncAck(msgTime: Long, routerID: String, routerTime: Int,spoutTime:Long)
case class RouterWorkerTimeSync(msgTime:Long,routerID:String,routerTime:Int)
case class UpdatedCounter(newValue: Int)
case class AssignedId(id: Int)
case class PartitionsCount(count: Int)
case class PartitionsCountResponse(count: Int)
case class RequestPartitionCount()
case class RequestPartitionId()
case class RequestRouterId()
case class CompressVertices(lastSaved: Long, workerID: Int)
case class CompressVertex(key: Long, time: Long)
case class FinishedVertexCompression(key: Long)
case class ArchiveVertices(compressTime: Long, archiveTime: Long, workerID: Int)
case class ArchiveVertex(key: Long, compressTime: Long, archiveTime: Long)
case class ArchiveOnlyVertex(key: Long, archiveTime: Long)
case class FinishedVertexArchiving(key: Long)
case class UpdateArrivalTime(wallClock:Long,time:Long)
case class WatermarkTime(time:Long)
sealed trait RaphReadClasses
case class VertexMessage(vertexID: Long, viewJob: ViewJob, superStep: Int, data:Any )
case class Setup(analyzer: Analyser, jobID: String, args:Array[String], superStep: Int, timestamp: Long, analysisType: AnalysisType.Value, window: Long, windowSet: Array[Long]) extends RaphReadClasses
case class SetupNewAnalyser(jobID: String, args:Array[String], superStep: Int, timestamp: Long, analysisType: AnalysisType.Value, window: Long, windowSet: Array[Long]) extends RaphReadClasses
case class Ready(messages: Int) extends RaphReadClasses
case class NextStep(analyzer: Analyser, jobID: String, args:Array[String], superStep: Int, timestamp: Long, analysisType: AnalysisType.Value, window: Long, windowSet: Array[Long]) extends RaphReadClasses
case class NextStepNewAnalyser(jobID: String, args:Array[String], superStep: Int, timestamp: Long, analysisType: AnalysisType.Value, window: Long, windowSet: Array[Long]) extends RaphReadClasses
case class EndStep(messages: Int, voteToHalt: Boolean) extends RaphReadClasses
case class Finish(analyzer: Analyser, jobID: String, args:Array[String], superStep: Int, timestamp: Long, analysisType: AnalysisType.Value, window: Long, windowSet: Array[Long]) extends RaphReadClasses
case class FinishNewAnalyser(jobID: String, args:Array[String], superStep: Int, timestamp: Long, analysisType: AnalysisType.Value, window: Long, windowSet: Array[Long]) extends RaphReadClasses
case class ReturnResults(results: Any)
case class ExceptionInAnalysis(e: String) extends RaphReadClasses
case class MessagesReceived(workerID: Int, receivedMessages: Int, sentMessages: Int) extends RaphReadClasses
case class CheckMessages(jobID:ViewJob,superstep: Int) extends RaphReadClasses
case class ReaderWorkersOnline() extends RaphReadClasses
case class ReaderWorkersACK() extends RaphReadClasses
case class LiveAnalysisPOST(analyserName:String, windowType:Option[String], windowSize:Option[Long], windowSet:Option[Array[Long]],repeatTime:Option[Long],eventTime:Option[Boolean],args:Option[Array[String]],rawFile:Option[String])
case class ViewAnalysisPOST(analyserName:String,timestamp:Long,windowType:Option[String],windowSize:Option[Long],windowSet:Option[Array[Long]],args:Option[Array[String]],rawFile:Option[String])
case class RangeAnalysisPOST(analyserName:String,start:Long,end:Long,jump:Long,windowType:Option[String],windowSize:Option[Long],windowSet:Option[Array[Long]],args:Option[Array[String]],rawFile:Option[String])
trait AnalysisRequest
case class LiveAnalysisRequest(
analyserName: String,
repeatTime:Long =0L,
eventTime:Boolean=false,
windowType: String = "false",
windowSize: Long = 0L,
windowSet: Array[Long] = Array[Long](0),
args:Array[String]=Array(),
rawFile:String=""
) extends AnalysisRequest
case class ViewAnalysisRequest(
analyserName: String,
timestamp: Long,
windowType: String = "false",
windowSize: Long = 0L,
windowSet: Array[Long] = Array[Long](0),
args:Array[String]=Array(),
rawFile:String=""
) extends AnalysisRequest
case class RangeAnalysisRequest(
analyserName: String,
start: Long,
end: Long,
jump: Long,
windowType: String = "false",
windowSize: Long = 0L,
windowSet: Array[Long] = Array[Long](0),
args:Array[String]=Array(),
rawFile:String=""
) extends AnalysisRequest
case class AnalyserPresentCheck(className: String) extends RaphReadClasses
case class AnalyserPresent() extends RaphReadClasses
case class ClassMissing() extends RaphReadClasses
case class FailedToCompile(stackTrace: String) extends RaphReadClasses
case class CompileNewAnalyser(analyser: String,args:Array[String], name: String) extends RaphReadClasses
case class ClassCompiled() extends RaphReadClasses
case class TimeCheck(timestamp: Long) extends RaphReadClasses
case class TimeResponse(ok: Boolean, time: Long) extends RaphReadClasses
case class RequestResults(jobID:String)
case class KillTask(jobID:String)
case class JobKilled()
case class ResultsForApiPI(results:Array[String])
case class JobDoesntExist()
case class AllocateTuple(record: Any)
case class AllocateTrackedTuple(wallClock:Long,record:Any)
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/serialisers/JSONSerialiser.scala
|
package com.raphtory.serialisers
import com.raphtory.api.Serialiser
import com.raphtory.core.model.analysis.entityVisitors.{EdgeVisitor, VertexVisitor}
import scala.collection.parallel.mutable.ParTrieMap
class JSONSerialiser extends Serialiser {
override def startOfFile(): String = "{\"directed\": true, \"multigraph\": false, \"graph\": {}, \"nodes\": [\n"
override def middleOfFile() :String = "],\n\"links\":[\n"
override def fileExtension(): String = {
"json"
}
override def serialiseVertex(v: VertexVisitor): String = {
val properties: String = extractProperties(v.getPropertySet())
if(properties.nonEmpty) {
if (v.Type().nonEmpty)
s"""\t{\"id\":${v.ID()},\"doctype\":\"${v.Type()}\",$properties}"""
else
s"""\t{\"id\":${v.ID()},$properties}"""
}
else {
if (v.Type().nonEmpty)
s"""\t{\"id\":${v.ID()},\"doctype\":\"${v.Type()}\"}"""
else
s"""\t{\"id\":${v.ID()}}"""
}
}
private def extractProperties(ps: ParTrieMap[String,Any]) = {
ps.map(property =>
if (property._2.isInstanceOf[Long])
s"""\"${property._1}\":${property._2}"""
else
s"""\"${property._1}\":\"${property._2}\""""
).toArray.mkString(",")
}
override def serialiseEdge(e: EdgeVisitor): String = {
val properties = extractProperties(e.getPropertySet())
if(properties.nonEmpty) {
if (e.Type().nonEmpty)
s"""\t{\"source\":${e.src()},\"target\":${e.dst()},\"edgetype\":\"${e.Type()}\",$properties}"""
else
s"""\t{\"source\":${e.src()},\"target\":${e.dst()},$properties}"""
}
else {
if (e.Type().nonEmpty)
s"""\t{\"source\":${e.src()},\"target\":${e.dst()},\"edgetype\":\"${e.Type()}\"}"""
else
s"""\t{\"source\":${e.src()},\"target\":${e.dst()}}"""
}
}
override def endOfFile(): String = "\n]}\n"
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/model/graphentities/Edge.scala
|
package com.raphtory.core.model.graphentities
import com.raphtory.core.model.EntityStorage
import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap
/**
* Companion Edge object (extended creator for storage loads)
*/
object Edge {
def apply(
workerID: Int,
creationTime: Long,
srcID: Long,
dstID: Long,
previousState: mutable.TreeMap[Long, Boolean],
properties: ParTrieMap[String, Property],
storage: EntityStorage
) = {
val e = new Edge(workerID, creationTime, srcID, dstID, initialValue = true)
e.history = previousState
e.properties = properties
e
}
}
/**
* Created by Mirate on 01/03/2017.
*/
class Edge(workerID: Int, msgTime: Long, srcId: Long, dstId: Long, initialValue: Boolean)
extends Entity(msgTime, initialValue) {
def killList(vKills: mutable.TreeMap[Long, Boolean]): Unit = history ++= vKills
def getSrcId: Long = srcId
def getDstId: Long = dstId
def getWorkerID: Int = workerID
def viewAt(time: Long): Edge = {
var closestTime: Long = 0
var value = false
for ((k, v) <- history)
if (k <= time)
if ((time - k) < (time - closestTime)) {
closestTime = k
value = v
}
val edge = new Edge(-1, closestTime, srcId, dstId, value)
for ((k, p) <- properties) {
val value = p.valueAt(time)
if (!(value equals ("")))
edge + (time, false, k, value)
}
edge
}
}
|
dorely103/Raphtory
|
mainproject/src/tests/scala/lotr/LOTRDeployment.scala
|
<reponame>dorely103/Raphtory
package lotr
import com.raphtory.RaphtoryGraph
import com.raphtory.algorithms.{ConnectedComponents, DegreeBasic}
object LOTRDeployment extends App{
val source = new LOTRSpout()
val builder = new LOTRGraphBuilder()
val rg = RaphtoryGraph[String](source,builder)
val arguments = Array[String]()
//rg.rangeQuery(ConnectedComponents(),start = 1,end = 32674,increment = 100,arguments)
//rg.rangeQuery(ConnectedComponents(),start = 1,end = 32674,increment = 100,window=100,arguments)
//rg.rangeQuery(ConnectedComponents(),start = 1,end = 32674,increment = 100,windowBatch=Array(10,50,100),arguments)
//rg.viewQuery(DegreeBasic(),timestamp = 10000,arguments)
// rg.viewQuery(DegreeBasic(),timestamp = 10000,window=100,arguments)
rg.viewQuery(DegreeBasic(),timestamp = 10000,windowBatch=Array(100,50,10),arguments)
rg.viewQuery(DegreeBasic(),timestamp = 10000,windowBatch=Array(10,50,100),arguments)
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/PartitionManager/Reader.scala
|
<reponame>dorely103/Raphtory
package com.raphtory.core.actors.PartitionManager
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated}
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import akka.cluster.pubsub.DistributedPubSubMediator.SubscribeAck
import com.raphtory.core.actors.PartitionManager.Workers.ReaderWorker
import com.raphtory.core.actors.RaphtoryActor
import com.raphtory.core.model.EntityStorage
import com.raphtory.core.model.communication._
import scala.collection.parallel.mutable.ParTrieMap
import scala.util.Try
class Reader(
id: Int,
test: Boolean,
managerCountVal: Int,
storage: ParTrieMap[Int, EntityStorage],
) extends RaphtoryActor {
implicit var managerCount: Int = managerCountVal
// Id which refers to the partitions position in the graph manager map
val managerId: Int = id
val mediator: ActorRef = DistributedPubSub(context.system).mediator
mediator ! DistributedPubSubMediator.Put(self)
var readers: ParTrieMap[Int, ActorRef] = new ParTrieMap[Int, ActorRef]()
for (i <- 0 until totalWorkers) {
log.debug("Initialising [{}] worker children for Reader [{}}.", totalWorkers, managerId)
// create threads for writing
val child = context.system.actorOf(
Props(new ReaderWorker(managerCount, managerId, i, storage(i))).withDispatcher("reader-dispatcher"),
s"Manager_${id}_reader_$i"
)
context.watch(child)
readers.put(i, child)
}
override def preStart(): Unit =
log.debug("Reader [{}] is being started.", managerId)
override def receive: Receive = {
case ReaderWorkersOnline() => sender ! ReaderWorkersACK()
case req: AnalyserPresentCheck => processAnalyserPresentCheckRequest(req)
case req: UpdatedCounter => processUpdatedCounterRequest(req)
case SubscribeAck =>
case Terminated(child) =>
log.warning(s"ReaderWorker with path [{}] belonging to Reader [{}] has died.", child.path, managerId)
case x => log.warning(s"Reader [{}] received unknown [{}] message.", managerId, x)
}
def processAnalyserPresentCheckRequest(req: AnalyserPresentCheck): Unit = {
log.debug(s"Reader [{}] received [{}] request.", managerId, req)
val className = req.className
val classExists = Try(Class.forName(className))
classExists.toEither.fold(
{ _: Throwable =>
log.debug("Class [{}] was not found within this image.", className)
sender ! ClassMissing()
}, { _: Class[_] =>
log.debug(s"Class [{}] exists. Proceeding.", className)
sender ! AnalyserPresent()
}
)
}
def processUpdatedCounterRequest(req: UpdatedCounter): Unit = {
log.debug("Reader [{}] received [{}] request.", managerId, req)
managerCount = req.newValue
readers.foreach(x => x._2 ! UpdatedCounter(req.newValue))
}
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/Tasks/ViewTasks/WindowedViewAnalysisTask.scala
|
package com.raphtory.analysis.Tasks.ViewTasks
import com.raphtory.api.Analyser
import com.raphtory.core.model.communication.AnalysisType
class WindowedViewAnalysisTask(managerCount:Int, jobID: String,args:Array[String], analyser: Analyser, time: Long, window: Long,newAnalyser:Boolean,rawFile:String)
extends ViewAnalysisTask(managerCount,jobID: String,args, analyser, time: Long,newAnalyser,rawFile) {
override def windowSize(): Long = window
override def processResults(time: Long) =
analyser.processWindowResults(result, timestamp(), windowSize(), viewCompleteTime)
override protected def analysisType(): AnalysisType.Value = AnalysisType.view
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/Tasks/ViewTasks/BWindowedViewAnalysisTask.scala
|
<reponame>dorely103/Raphtory<filename>mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/Tasks/ViewTasks/BWindowedViewAnalysisTask.scala
package com.raphtory.analysis.Tasks.ViewTasks
import com.raphtory.api.Analyser
import com.raphtory.core.model.communication.AnalysisType
import scala.collection.mutable.ArrayBuffer
class BWindowedViewAnalysisTask(managerCount:Int, jobID: String, args:Array[String],analyser: Analyser, time: Long, windows: Array[Long],newAnalyser:Boolean,rawFile:String)
extends ViewAnalysisTask(managerCount,jobID, args,analyser, time,newAnalyser,rawFile) {
override def result(): ArrayBuffer[Any] = {
val original = super.result()
if (original.nonEmpty) {
val invertedArray = ArrayBuffer[ArrayBuffer[Any]]()
for (i <- original(0).asInstanceOf[ArrayBuffer[Any]].indices)
invertedArray += new ArrayBuffer[Any]()
original.foreach { x =>
val internal = x.asInstanceOf[ArrayBuffer[Any]]
for (j <- internal.indices)
invertedArray(j) += internal(j)
}
invertedArray.asInstanceOf[ArrayBuffer[Any]]
} else original
}
override def windowSet(): Array[Long] = windows.sortBy(x=>x)(sortOrdering)
override def processResults(time: Long): Unit = {
var i = 0
val vtime = viewCompleteTime
result().asInstanceOf[ArrayBuffer[ArrayBuffer[Any]]].foreach(res =>{
analyser.processWindowResults(res, timestamp(), windowSet()(i),vtime )
i+=1
})
}
override protected def analysisType(): AnalysisType.Value = AnalysisType.view
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/Tasks/ViewTasks/ViewAnalysisTask.scala
|
<reponame>dorely103/Raphtory
package com.raphtory.analysis.Tasks.ViewTasks
import com.raphtory.api.Analyser
import com.raphtory.analysis.Tasks.AnalysisTask
import com.raphtory.core.model.communication.AnalysisType
class ViewAnalysisTask(managerCount:Int, jobID: String,args:Array[String], analyser: Analyser, time: Long,newAnalyser:Boolean,rawFile:String)
extends AnalysisTask(jobID: String, args, analyser,managerCount,newAnalyser,rawFile) {
override def timestamp(): Long = time
override protected def analysisType(): AnalysisType.Value = AnalysisType.view
override def restart(): Unit = {
println(s"View Analysis manager for $jobID at ${time} finished")
//killme()
}
override def processResults(timestamp: Long): Unit =
analyser.processResults(result, this.timestamp(), viewCompleteTime)
}
|
dorely103/Raphtory
|
mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/AnalysisManager.scala
|
<filename>mainproject/src/main/scala/com/raphtory/core/actors/AnalysisManager/AnalysisManager.scala
package com.raphtory.core.actors.AnalysisManager
import akka.actor.{Actor, ActorRef, InvalidActorNameException, PoisonPill, Props}
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import akka.pattern.ask
import akka.util.Timeout
import com.raphtory.api.{Analyser, BlankAnalyser, LoadExternalAnalyser}
import com.raphtory.analysis.Tasks.LiveTasks.{BWindowedLiveAnalysisTask, LiveAnalysisTask, WindowedLiveAnalysisTask}
import com.raphtory.analysis.Tasks.RangeTasks.{BWindowedRangeAnalysisTask, RangeAnalysisTask, WindowedRangeAnalysisTask}
import com.raphtory.analysis.Tasks.ViewTasks.{BWindowedViewAnalysisTask, ViewAnalysisTask, WindowedViewAnalysisTask}
import com.raphtory.core.actors.RaphtoryActor
import com.raphtory.core.model.communication.{ClusterStatusRequest, ClusterStatusResponse, _}
import scala.collection.parallel.mutable.ParTrieMap
import scala.concurrent.Await
import scala.concurrent.duration.{Duration, _}
import scala.language.postfixOps
case class StartAnalysis()
class AnalysisManager() extends RaphtoryActor{
implicit val executionContext = context.system.dispatchers.lookup("misc-dispatcher")
implicit val timeout: Timeout = 10.seconds
final protected val mediator = DistributedPubSub(context.system).mediator
mediator ! DistributedPubSubMediator.Put(self)
val debug = System.getenv().getOrDefault("DEBUG", "false").trim.toBoolean
val currentTasks = ParTrieMap[String, ActorRef]()
private var safe = false
protected var managerCount: Int = 0 //Number of Managers in the Raphtory Cluster
override def preStart() {
context.system.scheduler.scheduleOnce(Duration(1, SECONDS), self, "startUp")
}
override def receive: Receive = {
case "startUp" => clusterReadyForAnalysis() //first ask the watchdog if it is safe to do analysis and what the size of the cluster is //when the watchdog responds, set the new value and message each Reader Worker
case PartitionsCount(newValue) => managerCount = newValue //for if managerCount is republished
case request:LiveAnalysisRequest => if(!safe) notYet(request) else spawnLiveAnalysisManager(request)
case request:ViewAnalysisRequest => if(!safe) notYet(request) else spawnViewAnalysisManager(request)
case request:RangeAnalysisRequest => if(!safe) notYet(request) else spawnRangeAnalysisManager(request)
case RequestResults(jobID) => checkResults(jobID)
case KillTask(jobID) => killJob(jobID)
}
def checkResults(jobID: String) = {
if(currentTasks contains jobID){
try {
val future = currentTasks(jobID) ? RequestResults(jobID:String)
Await.result(future, timeout.duration) match {
case results:ResultsForApiPI => sender ! results
}
} catch {
case _: java.util.concurrent.TimeoutException =>
}
}
else sender() ! JobDoesntExist()
}
def killJob(jobID: String) = {
if(currentTasks contains jobID){
currentTasks(jobID) ! PoisonPill
currentTasks remove(jobID)
sender() ! JobKilled()
}
else sender()! JobDoesntExist()
}
def spawnLiveAnalysisManager(request: LiveAnalysisRequest): Unit = {
try {
val jobID = request.analyserName+"_"+System.currentTimeMillis().toString
println(s"Live Analysis Task received, your job ID is ${jobID}")
val args = request.args
val repeatTime = request.repeatTime
val eventTime = request.eventTime
val analyserFile = request.rawFile
val buildAnalyser = getAnalyser(request.analyserName,args,request.rawFile)
val newAnalyser = buildAnalyser._1
val analyser = buildAnalyser._2
if(analyser.isInstanceOf[BlankAnalyser])
return
val ref= request.windowType match {
case "false" =>
context.system.actorOf(Props(new LiveAnalysisTask(managerCount, jobID,args, analyser,repeatTime,eventTime,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"), s"LiveAnalysisTask_$jobID")
case "true" =>
context.system.actorOf(Props(new WindowedLiveAnalysisTask(managerCount, jobID,args, analyser,repeatTime,eventTime, request.windowSize,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"), s"LiveAnalysisTask__windowed_$jobID")
case "batched" =>
context.system.actorOf(Props(new BWindowedLiveAnalysisTask(managerCount, jobID,args, analyser,repeatTime,eventTime, request.windowSet,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"), s"LiveAnalysisTask__batchWindowed_$jobID")
}
currentTasks put (jobID,ref)
}
catch {
case e:InvalidActorNameException => println("Name non unique please kill other job first")
}
}
def spawnViewAnalysisManager(request: ViewAnalysisRequest): Unit = {
try{
val jobID = request.analyserName+"_"+System.currentTimeMillis().toString
println(s"View Analysis Task received, your job ID is ${jobID}")
val timestamp = request.timestamp
val args = request.args
val buildAnalyser = getAnalyser(request.analyserName,args,request.rawFile)
val newAnalyser = buildAnalyser._1
val analyser = buildAnalyser._2
if(analyser.isInstanceOf[BlankAnalyser])
return
val analyserFile = request.rawFile
val ref =request.windowType match {
case "false" =>
context.system.actorOf(Props(new ViewAnalysisTask(managerCount,jobID,args,analyser, timestamp,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"), s"ViewAnalysisTask_$jobID")
case "true" =>
context.system.actorOf(
Props(new WindowedViewAnalysisTask(managerCount,jobID,args, analyser, timestamp, request.windowSize,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"),
s"ViewAnalysisTask_windowed_$jobID"
)
case "batched" =>
context.system.actorOf(
Props(new BWindowedViewAnalysisTask(managerCount,jobID, args,analyser, timestamp, request.windowSet,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"),
s"ViewAnalysisTask_batchWindowed_$jobID"
)
}
currentTasks put (jobID,ref)
}
catch {
case e:InvalidActorNameException => println("Name non unique please kill other job first")
}
}
def spawnRangeAnalysisManager(request: RangeAnalysisRequest): Unit = {
try{
val jobID = request.analyserName+"_"+System.currentTimeMillis().toString
println(s"Range Analysis Task received, your job ID is ${jobID}, running ${request.analyserName}, between ${request.start} and ${request.end} jumping ${request.jump} at a time.")
val start = request.start
val end = request.end
val jump = request.jump
val args = request.args
val analyserFile = request.rawFile
val buildAnalyser = getAnalyser(request.analyserName,args,request.rawFile)
val newAnalyser = buildAnalyser._1
val analyser = buildAnalyser._2
if(analyser.isInstanceOf[BlankAnalyser])
return
val ref = request.windowType match {
case "false" =>
context.system
.actorOf(Props(new RangeAnalysisTask(managerCount,jobID, args,analyser, start, end, jump,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"), s"RangeAnalysisTask_$jobID")
case "true" =>
context.system.actorOf(
Props(new WindowedRangeAnalysisTask(managerCount,jobID, args,analyser, start, end, jump, request.windowSize,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"),
s"RangeAnalysisTask_windowed_$jobID"
)
case "batched" =>
context.system.actorOf(
Props(new BWindowedRangeAnalysisTask(managerCount,jobID,args, analyser, start, end, jump, request.windowSet,newAnalyser,analyserFile)).withDispatcher("analysis-dispatcher"),
s"RangeAnalysisTask_batchWindowed_$jobID"
)
}
currentTasks put (jobID,ref)
}
catch {
case e:InvalidActorNameException => println("Name non unique please kill other job first")
}
}
private def clusterReadyForAnalysis(): Unit =
if (!safe)
try {
implicit val timeout: Timeout = Timeout(10 seconds) //time to wait for watchdog response
val future = mediator ? DistributedPubSubMediator.Send("/user/WatchDog", ClusterStatusRequest(), false) //ask if the cluster is safe to use
if(Await.result(future, timeout.duration).asInstanceOf[ClusterStatusResponse].clusterUp) { //if it is
val future = mediator ? DistributedPubSubMediator.Send("/user/WatchDog", RequestPartitionCount(), false) //ask how many partitions there are
managerCount = Await.result(future, timeout.duration).asInstanceOf[PartitionsCountResponse].count //when they respond set the partition manager count to this value
safe = true
println("Cluster ready for Analysis")
}
else{
context.system.scheduler.scheduleOnce(Duration(1, SECONDS), self, "startUp")
}
} catch {
case e: java.util.concurrent.TimeoutException => context.system.scheduler.scheduleOnce(Duration(1, SECONDS), self, "startUp")
}
private def notYet(request:AnalysisRequest) = {
//println("Cluster not ready for analysis yet, resubmitting in 5 seconds")
context.system.scheduler.scheduleOnce(Duration(5, SECONDS), self, request)
}
private def getAnalyser(analyserName:String,args:Array[String],rawFile:String): (Boolean,Analyser) ={
try {
(false,Class.forName(analyserName).getConstructor(classOf[Array[String]]).newInstance(args).asInstanceOf[Analyser])
} catch {
case e:NoSuchMethodException =>
try {
(false, Class.forName(analyserName).getConstructor().newInstance().asInstanceOf[Analyser])
}
catch {
case e:ClassNotFoundException => processCompileNewAnalyserRequest(rawFile,args)
}
}
}
def processCompileNewAnalyserRequest(rawFile:String,args:Array[String]): (Boolean,Analyser) = {
var analyser: Analyser = new BlankAnalyser(args)
try{
analyser = LoadExternalAnalyser(rawFile,args).newAnalyser
}
catch {
case e:Exception => {
sender ! FailedToCompile(e.getStackTrace.toString)
println(e.getMessage)
println(analyser.getClass)
}
}
(true,analyser)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.