repo_name
stringlengths
6
97
path
stringlengths
3
341
text
stringlengths
8
1.02M
msatala/wunderx
wunderx-server/app/controllers/WundeRxCtrl.scala
package controllers import akka.actor.Props import boilerplate.WunderxProtocol import jsactor.bridge.server.ServerBridgeActor import play.api.libs.concurrent.Akka import play.api.mvc._ object WundeRxCtrl extends Controller { import play.api.Play.current val taskServerActor = Akka.system.actorOf(Props(new TaskServerActor), "TaskServerActor") def index = Action { implicit request ⇒ Ok(views.html.index()) } def ws = WebSocket.acceptWithActor[String, String] { req ⇒ websocket ⇒ implicit val protocol = WunderxProtocol ServerBridgeActor.props(websocket) } }
msatala/wunderx
wunderx-client/src/main/scala/boilerplate/NgOps.scala
package boilerplate import biz.enef.angulate.Scope import rx._ import rx.ops._ import scala.scalajs.js /** * Created by <NAME> * Date: 15.6.2015 * Time: 13:06 */ object NgOps { implicit class ScopeOps(s: Scope) { def dynamic = s.asInstanceOf[js.Dynamic] def safeApply(any: => Any) = { if (dynamic.$$phase == null) { s.$apply(() => any) } else any } } implicit class LunaVarOps[T](val source: Var[T]) extends AnyVal { def asRx: Rx[T] = source.asInstanceOf[Rx[T]] } implicit class LunaRxOps[+T](val rx: Rx[T]) extends AnyVal { /** * Converts Rx() into a plain javascript function, that can be called by angular. * Update of underlying Rx() triggers $scope.$apply() to ensure that template is re-rendered.. * @return No-parameter javascript function */ def toNg(implicit $scope: Scope): js.Function0[T] = { var value: T = rx() val obs = rx.foreach(newValue => $scope.safeApply(value = newValue), skipInitial = true) $scope.$on("$destroy", () => obs.kill()) () => value } } }
msatala/wunderx
wunderx-client/src/main/scala/boilerplate/WebsocketJsActors.scala
package boilerplate import jsactor.bridge.client.SocketManager import jsactor.bridge.client.util.RemoteActorListener import jsactor.logging.impl.JsPrintlnActorLoggerFactory import jsactor.{JsActorRef, JsActorSystem} import org.scalajs.dom import scala.scalajs.js.Dynamic /** * Created by <NAME> * Date: 7.6.2015 * Time: 19:57 */ class ProxyActor(remoteActorPath: String, val wsManager: JsActorRef) extends RemoteActorListener { override def actorPath: String = remoteActorPath override def onConnect(serverActor: JsActorRef): Unit = {} override def whenConnected(serverActor: JsActorRef): Receive = { case msg => if (sender() == context.parent) { serverActor ! msg } else { context.parent ! msg } } } object WebsocketJsActors { val actorSystem = JsActorSystem("WunderxClient", JsPrintlnActorLoggerFactory) val wsManager = { val webSocketUrl = dom.window.asInstanceOf[Dynamic].webSocketUrl.asInstanceOf[String] implicit val protocol = WunderxProtocol actorSystem.actorOf(SocketManager.props(SocketManager.Config(webSocketUrl)), "socketManager") } }
msatala/wunderx
wunderx-server/app/controllers/TaskServerActor.scala
<gh_stars>1-10 package controllers import akka.actor.{Actor, ActorRef, Terminated} import boilerplate.Subscribe import wunderx.Task /** * Created by <NAME> * Date: 4.6.2015 * Time: 22:04 */ class TaskServerActor extends Actor { var tasksMap = Map( 1 -> Task(1, "Foo", completed = true), 2 -> Task(2, "Bar", completed = false) ) var subscribers = Set.empty[ActorRef] override def receive: Receive = { case Subscribe => context watch sender() subscribers += sender() tasksMap.valuesIterator.foreach(sender() ! _) case Terminated(actor) => subscribers -= actor case task: Task => tasksMap = tasksMap withTask task subscribers.filterNot(_ == sender()).foreach(_ ! task) } }
msatala/wunderx
wunderx-shared/src/main/scala/wunderx/Task.scala
package wunderx import scala.scalajs.js.annotation.JSExportAll /** * Created by <NAME> * Date: 4.6.2015 * Time: 17:06 */ @JSExportAll case class Task(id: Int, text: String, completed: Boolean) { def toggle = this.copy(completed = !this.completed) } object Task { type TasksMap = Map[Int, Task] implicit class TasksMapOps(tasksMap: TasksMap) { def withTask(task: Task) = tasksMap + (task.id -> task) } def create(text: String): Task = Task( id = (Math.random() * Int.MaxValue).toInt, text = text, completed = false ) }
msatala/wunderx
wunderx-client/src/main/scala/wunderx/WunderxClient.scala
package wunderx import biz.enef.angulate._ import boilerplate.NgOps._ import boilerplate.{WebsocketJsActors, ProxyActor, Subscribe} import jsactor.{JsActor, JsProps} import rx._ import wunderx.Task._ import scala.scalajs.js import scala.scalajs.js.annotation.JSExport /** * Created by <NAME> * Date: 4.6.2015 * Time: 16:47 */ class TasksClientActor(tasksMapVar: Var[TasksMap]) extends JsActor { val proxy = context.actorOf(JsProps(new ProxyActor("/user/TaskServerActor", WebsocketJsActors.wsManager))) override def preStart(): Unit = { proxy ! Subscribe } override def receive: Receive = { case task: Task => tasksMapVar() = tasksMapVar() withTask task if (sender() != proxy) proxy ! task } } class TasksCtrl($scope: Scope) extends Controller { private implicit val s = $scope private val tasksMapVar: Var[TasksMap] = Var(Map()) val tasksActorRef = WebsocketJsActors.actorSystem.actorOf(JsProps(new TasksClientActor(tasksMapVar))) val partitionedTasksRx = Rx(tasksMapVar().values.partition(_.completed)) val completedRx = Rx(partitionedTasksRx()._1.to[js.Array]).toNg val todoRx = Rx(partitionedTasksRx()._2.to[js.Array]).toNg def taskToggle(task: Task) = tasksActorRef ! task.toggle def addTask(newTaskText: String) = tasksActorRef ! Task.create(newTaskText) } object WunderxClient extends js.JSApp { @JSExport override def main(): Unit = { val module = angular.createModule("wunderx", Seq("ionic")) module.controllerOf[TasksCtrl]("TasksCtrl") } }
msatala/wunderx
build.sbt
<filename>build.sbt import sbt.Project.projectToRef lazy val scalaVs = Seq("2.11.6") lazy val wunderxServer = (project in file("wunderx-server")).settings( crossScalaVersions := scalaVs, scalaVersion := scalaVs.head, scalaJSProjects := Seq(wunderxClient), scalacOptions ++= Settings.scalacOptions, pipelineStages := Seq(scalaJSProd, gzip), libraryDependencies ++= Seq( "com.vmunier" %% "play-scalajs-scripts" % "0.1.0", "com.codemettle.jsactor" %% "jsactor-bridge-server" % "0.6.1" )). enablePlugins(PlayScala). aggregate(wunderxClient). dependsOn(wunderxSharedJvm) lazy val wunderxClient = (project in file("wunderx-client")).settings( crossScalaVersions := scalaVs, scalaVersion := scalaVs.head, scalacOptions ++= Settings.scalacOptions, persistLauncher := true, persistLauncher in Test := false, sourceMapsDirectories += wunderxSharedJs.base / "..", unmanagedSourceDirectories in Compile := Seq((scalaSource in Compile).value), libraryDependencies ++= Seq( "com.codemettle.jsactor" %%% "jsactor" % "0.6.1", "com.codemettle.jsactor" %%% "jsactor-bridge-client" % "0.6.1", "com.codemettle.jsactor" %%% "jsactor-loglevel" % "0.6.1", "org.scala-js" %%% "scalajs-dom" % "0.8.0", "biz.enef" %%% "scalajs-angulate" % "0.2.1", "com.lihaoyi" %%% "scalarx" % "0.2.8" ), skip in packageJSDependencies := false ). enablePlugins(ScalaJSPlugin, ScalaJSPlay). dependsOn(wunderxSharedJs) lazy val wunderxShared = (crossProject.crossType(CrossType.Pure) in file("wunderx-shared")). settings( crossScalaVersions := scalaVs, scalaVersion := scalaVs.head, scalacOptions ++= Settings.scalacOptions, libraryDependencies ++= Seq( "com.codemettle.jsactor" %%% "jsactor-bridge-shared" % "0.6.1", "org.scala-js" %% "scalajs-stubs" % scalaJSVersion % "provided" )). jsConfigure(_ enablePlugins ScalaJSPlay). jsSettings(sourceMapsBase := baseDirectory.value / "..") lazy val wunderxSharedJvm = wunderxShared.jvm lazy val wunderxSharedJs = wunderxShared.js // loads the jvm project at sbt startup onLoad in Global := (Command.process("project wunderxServer", _: State)) compose (onLoad in Global).value
msatala/wunderx
wunderx-shared/src/main/scala/boilerplate/WunderxProtocol.scala
<gh_stars>1-10 package boilerplate import jsactor.bridge.protocol.BridgeProtocol import jsactor.bridge.protocol.BridgeProtocol.MessageRegistry import upickle._ import wunderx.Task import scala.reflect.ClassTag /** * Created by <NAME> * Date: 7.6.2015 * Time: 20:00 */ case object Subscribe object WunderxProtocol extends BridgeProtocol { override def registerMessages(registry: MessageRegistry): Unit = { def add[A: Reader : Writer : ClassTag] = { registry.add[A] } def addObj[A <: Singleton : Reader : Writer : ClassTag](obj: A) = { registry.addObj(obj) } add[Task] addObj(Subscribe) } }
OliverAbdulrahim/Sandbox
src/Happy.scala
<reponame>OliverAbdulrahim/Sandbox import scala.collection.immutable.Vector object Happy extends App { def format(v: Vector[_]) = v.mkString(", ") def testEngineerExuberance(engineers: Vector[Engineer]): Unit = { val (happy, unhappy) = engineers partition (_.isHappy) if (happy.size > unhappy.size) { println(s"Hooray, these people are happy: ${format(happy)}!") } else { println(s"Bummer, these people are upset: ${format(unhappy)} :(") } } testEngineerExuberance(Vector(new Engineer("<NAME>", false), new Engineer("<NAME>", true))) } class Engineer(val name: String, val isHappy: Boolean) { override def toString = name capitalize }
Jolanrensen/kotlin-spark-api
core/2.4/src/main/scala/org/jetbrains/kotlinx/spark/extensions/KSparkExtensions.scala
<reponame>Jolanrensen/kotlin-spark-api<gh_stars>0 /*- * =LICENSE= * Kotlin Spark API: Examples * ---------- * Copyright (C) 2019 - 2020 JetBrains * ---------- * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * =LICENSEEND= */ package org.jetbrains.kotlinx.spark.extensions import java.util import org.apache.spark.SparkContext import org.apache.spark.sql._ import scala.collection.JavaConverters._ object KSparkExtensions { def col(d: Dataset[_], name: String): Column = d.col(name) def col(name: String): Column = functions.col(name) def lit(literal: Any): Column = functions.lit(literal) def collectAsList[T](ds: Dataset[T]): util.List[T] = ds.collect().toSeq.asJava def debugCodegen(df: Dataset[_]): Unit = { import org.apache.spark.sql.execution.debug._ df.debugCodegen() } def debug(df: Dataset[_]): Unit = { import org.apache.spark.sql.execution.debug._ df.debug() } def sparkContext(s: SparkSession): SparkContext = s.sparkContext }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/scaladsl/ServiceHandler.scala
/* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.scaladsl import scala.concurrent.Future import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.model.StatusCodes object ServiceHandler { private val notFound = Future.successful(HttpResponse(StatusCodes.NotFound)) /** * Creates a [[HttpRequest]] to [[HttpResponse]] handler that can be used in for example `Http().bindAndHandleAsync` * for the generated partial function handlers and ends with `StatusCodes.NotFound` if the request is not matching. */ def concatOrNotFound( handlers: PartialFunction[HttpRequest, Future[HttpResponse]]*): HttpRequest => Future[HttpResponse] = handlers .foldLeft(PartialFunction.empty[HttpRequest, Future[HttpResponse]]) { case (acc, pf) => acc.orElse(pf) } .orElse { case _ => notFound } }
kristoflemmens/akka-grpc
project/Common.scala
<filename>project/Common.scala import sbt.Keys._ import sbt._ import sbt.plugins.JvmPlugin import akka.grpc.Dependencies.Versions.{ scala212, scala213 } import org.scalafmt.sbt.ScalafmtPlugin.autoImport.scalafmtOnCompile object Common extends AutoPlugin { override def trigger = allRequirements override def requires = JvmPlugin override def globalSettings = Seq( organization := "com.lightbend.akka.grpc", organizationName := "Lightbend Inc.", organizationHomepage := Some(url("https://www.lightbend.com/")), // apiURL := Some(url(s"https://doc.akka.io/api/akka-grpc/${version.value}")), homepage := Some(url("https://akka.io/")), scmInfo := Some(ScmInfo(url("https://github.com/akka/akka-grpc"), "<EMAIL>:akka/akka-grpc")), developers += Developer( "contributors", "Contributors", "https://gitter.im/akka/dev", url("https://github.com/akka/akka-grpc/graphs/contributors")), licenses := Seq("Apache-2.0" -> url("https://www.apache.org/licenses/LICENSE-2.0")), description := "Akka gRPC - Support for building streaming gRPC servers and clients on top of Akka Streams.") override lazy val projectSettings = Seq( scalacOptions ++= List("-unchecked", "-deprecation", "-language:_", "-encoding", "UTF-8"), javacOptions ++= List("-Xlint:unchecked", "-Xlint:deprecation"), crossScalaVersions := Seq(scala212, scala213), scalafmtOnCompile := true) }
kristoflemmens/akka-grpc
interop-tests/src/main/scala/akka/grpc/interop/IoGrpcClient.scala
<gh_stars>0 /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.interop import io.grpc.internal.testing.TestUtils import io.grpc.testing.integration2.{ GrpcJavaClientTester, Settings, TestServiceClient } object IoGrpcClient extends GrpcClient { override def run(args: Array[String]): Unit = { TestUtils.installConscryptIfAvailable() val settings = Settings.parseArgs(args) val client = new TestServiceClient(new GrpcJavaClientTester(settings)) client.setUp() try client.run(settings) finally { client.tearDown() } } }
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/javadsl/JavaInterfaceCodeGenerator.scala
<reponame>kristoflemmens/akka-grpc<gh_stars>0 /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen.javadsl import scala.collection.immutable import akka.grpc.gen.Logger import com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse import templates.JavaCommon.txt.ApiInterface object JavaInterfaceCodeGenerator extends JavaCodeGenerator { override def name = "akka-grpc-javadsl-interface" override def perServiceContent: Set[(Logger, Service) => immutable.Seq[CodeGeneratorResponse.File]] = super.perServiceContent + generateServiceFile val generateServiceFile: (Logger, Service) => immutable.Seq[CodeGeneratorResponse.File] = (logger, service) => { val b = CodeGeneratorResponse.File.newBuilder() b.setContent(ApiInterface(service).body) b.setName(s"${service.packageDir}/${service.name}.java") logger.info(s"Generating Akka gRPC service interface for [${service.packageName}.${service.name}]") immutable.Seq(b.build) } }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/javadsl/GrpcMarshalling.scala
<reponame>kristoflemmens/akka-grpc /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.javadsl import java.util.concurrent.{ CompletableFuture, CompletionStage } import io.grpc.Status import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.model.HttpEntity.LastChunk import akka.http.scaladsl.model.{ HttpEntity => SHttpEntity, HttpResponse => SHttpResponse } import akka.http.scaladsl.model.headers.RawHeader import akka.http.javadsl.model.{ HttpRequest, HttpResponse } import akka.stream.Materializer import akka.stream.javadsl.{ Sink, Source } import akka.stream.scaladsl.{ Source => SSource } import akka.grpc._ import akka.grpc.internal.{ CancellationBarrierGraphStage, GrpcResponseHelpers } import akka.grpc.scaladsl.{ GrpcExceptionHandler => sGrpcExceptionHandler } import akka.grpc.scaladsl.headers.`Message-Encoding` object GrpcMarshalling { def unmarshal[T](req: HttpRequest, u: ProtobufSerializer[T], mat: Materializer): CompletionStage[T] = { val messageEncoding = `Message-Encoding`.findIn(req.getHeaders) req.entity.getDataBytes .via(Grpc.grpcFramingDecoder(messageEncoding)) .map(japiFunction(u.deserialize)) .runWith(Sink.head[T], mat) } def unmarshalStream[T]( req: HttpRequest, u: ProtobufSerializer[T], mat: Materializer): CompletionStage[Source[T, NotUsed]] = { val messageEncoding = `Message-Encoding`.findIn(req.getHeaders) CompletableFuture.completedFuture( req.entity.getDataBytes .via(Grpc.grpcFramingDecoder(messageEncoding)) .map(japiFunction(u.deserialize)) // In gRPC we signal failure by returning an error code, so we // don't want the cancellation bubbled out .via(new CancellationBarrierGraphStage) .mapMaterializedValue(japiFunction(_ => NotUsed))) } def marshal[T](e: T, m: ProtobufSerializer[T], mat: Materializer, codec: Codec, system: ActorSystem): HttpResponse = marshalStream(Source.single(e), m, mat, codec, system) def marshal[T]( e: T, m: ProtobufSerializer[T], mat: Materializer, codec: Codec, system: ActorSystem, eHandler: ActorSystem => PartialFunction[Throwable, Status] = sGrpcExceptionHandler.defaultMapper): HttpResponse = marshalStream(Source.single(e), m, mat, codec, system, eHandler) def marshalStream[T]( e: Source[T, NotUsed], m: ProtobufSerializer[T], mat: Materializer, codec: Codec, system: ActorSystem, eHandler: ActorSystem => PartialFunction[Throwable, Status] = sGrpcExceptionHandler.defaultMapper): HttpResponse = GrpcResponseHelpers(e.asScala, eHandler)(m, mat, Identity, system) def status(status: Status): HttpResponse = SHttpResponse(entity = SHttpEntity.Chunked(Grpc.contentType, SSource.single(trailer(status)))) private def trailer(status: Status): LastChunk = LastChunk( trailer = List(RawHeader("grpc-status", status.getCode.value.toString)) ++ Option(status.getDescription) .map(RawHeader("grpc-message", _))) }
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/Logging.scala
<reponame>kristoflemmens/akka-grpc<filename>codegen/src/main/scala/akka/grpc/gen/Logging.scala /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen import java.io.PrintWriter // specific to gen so that the build tools can implement their own trait Logger { def debug(text: String): Unit def info(text: String): Unit def warn(text: String): Unit def error(text: String): Unit } /** * Simple standard out logger for use in tests or where there is no logger from the build tool available */ object StdoutLogger extends Logger { def debug(text: String): Unit = println(s"[debug] $text") def info(text: String): Unit = println(s"[info] $text") def warn(text: String): Unit = println(s"[warn] $text") def error(text: String): Unit = println(s"[error] $text") } object SilencedLogger extends Logger { def debug(text: String): Unit = () def info(text: String): Unit = () def warn(text: String): Unit = () def error(text: String): Unit = () } class FileLogger(path: String) extends Logger { val printer = new PrintWriter(path, "UTF-8") def debug(text: String): Unit = { printer.println(s"[debug] $text") printer.flush() } def info(text: String): Unit = { printer.println(s"[info] $text") printer.flush() } def warn(text: String): Unit = { printer.println(s"[warn] $text") printer.flush() } def error(text: String): Unit = { printer.println(s"[error] $text") printer.flush() } }
kristoflemmens/akka-grpc
benchmark-java/project/plugins.sbt
<reponame>kristoflemmens/akka-grpc addSbtPlugin("com.lightbend.sbt" % "sbt-javaagent" % "0.1.5") lazy val plugins = project in file(".") dependsOn ProjectRef(file("../../"), "sbt-akka-grpc") // Use this instead of above when importing to IDEA, after publishLocal and replacing the version here //addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "0.1+32-fd597fcb+20180618-1248")
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/javadsl/Serializer.scala
/* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen.javadsl import com.google.protobuf.Descriptors.Descriptor final case class Serializer(name: String, init: String, messageType: String) object Serializer { def apply(messageType: Descriptor): Serializer = Serializer( messageType.getName + "Serializer", s"new GoogleProtobufSerializer<>(${Method.getMessageType(messageType)}.class)", Method.getMessageType(messageType)) }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/scaladsl/GrpcExceptionHandler.scala
<reponame>kristoflemmens/akka-grpc /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.scaladsl import akka.actor.ActorSystem import akka.grpc.GrpcServiceException import akka.grpc.internal.GrpcResponseHelpers import akka.http.scaladsl.model.HttpResponse import io.grpc.Status import scala.concurrent.{ ExecutionException, Future } object GrpcExceptionHandler { def default(mapper: PartialFunction[Throwable, Status])( implicit system: ActorSystem): PartialFunction[Throwable, Future[HttpResponse]] = mapper.orElse(defaultMapper(system)).andThen(s => Future.successful(GrpcResponseHelpers.status(s))) def defaultMapper(system: ActorSystem): PartialFunction[Throwable, Status] = { case e: ExecutionException => if (e.getCause == null) Status.INTERNAL else defaultMapper(system)(e.getCause) case grpcException: GrpcServiceException => grpcException.status case _: NotImplementedError => Status.UNIMPLEMENTED case _: UnsupportedOperationException => Status.UNIMPLEMENTED case other => system.log.error(other, other.getMessage) Status.INTERNAL } def default(implicit system: ActorSystem): PartialFunction[Throwable, Future[HttpResponse]] = default(defaultMapper(system)) }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/internal/HardcodedServiceDiscovery.scala
<filename>runtime/src/main/scala/akka/grpc/internal/HardcodedServiceDiscovery.scala<gh_stars>0 /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.internal import akka.discovery.{ Lookup, ServiceDiscovery } import akka.discovery.ServiceDiscovery.Resolved import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration class HardcodedServiceDiscovery(resolved: Resolved) extends ServiceDiscovery { override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = Future.successful(resolved) }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/scaladsl/Metadata.scala
<gh_stars>0 /* * Copyright (C) 2009-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.scaladsl import java.util.Base64 import akka.annotation.DoNotInherit import akka.http.scaladsl.model.HttpHeader import akka.util.ByteString import scala.collection.immutable /** * Immutable representation of the metadata in a call * * Not for user extension */ @DoNotInherit trait Metadata { /** * @return The text header value for `key` if one exists, if the same key has multiple values the last occurrence * that is a text key is used. */ def getText(key: String): Option[String] /** * @return The binary header value for `key` if one exists, if the same key has multiple values the last occurrence * that is a text key is used. */ def getBinary(key: String): Option[ByteString] /** * @return The metadata as a map. */ def asMap: Map[String, List[MetadataEntry]] } class MetadataImpl(headers: immutable.Seq[HttpHeader] = immutable.Seq.empty) extends Metadata { lazy private val map: Map[String, List[MetadataEntry]] = { // REVIEWER NOTE: modeled after akka.grpc.internal.MetadataImpl.metadataMapFromGoogleGrpcMetadata var entries = Map.empty[String, List[MetadataEntry]] headers.foreach { header => val key = header.lowercaseName() val entry = if (key.endsWith("-bin")) { val bytes = Base64.getDecoder.decode(header.value()) BytesEntry(ByteString(bytes)) } else { val text = header.value StringEntry(text) } if (entries.contains(key)) { entries += (key -> (entry :: entries(key))) } else entries += (key -> (entry :: Nil)) } entries } override def getText(key: String): Option[String] = headers.collectFirst { case header if header.name == key => header.value } override def getBinary(key: String): Option[ByteString] = headers.collectFirst { case header if header.name == key => ByteString(Base64.getDecoder.decode(header.value)) } override def asMap: Map[String, List[MetadataEntry]] = map }
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/javadsl/JavaServerCodeGenerator.scala
<gh_stars>0 /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen.javadsl import scala.collection.immutable import akka.grpc.gen.{ BuildInfo, CodeGenerator, Logger } import com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse import protocbridge.Artifact import templates.JavaServer.txt.{ Handler, PowerApiInterface } class JavaServerCodeGenerator extends JavaCodeGenerator { override def name = "akka-grpc-javadsl-server" override def perServiceContent: Set[(Logger, Service) => immutable.Seq[CodeGeneratorResponse.File]] = super.perServiceContent + generatePlainHandlerFactory + generatePowerHandlerFactory + generatePowerService override val suggestedDependencies = (scalaBinaryVersion: CodeGenerator.ScalaBinaryVersion) => Seq( Artifact( BuildInfo.organization, BuildInfo.runtimeArtifactName + "_" + scalaBinaryVersion.prefix, BuildInfo.version)) val generatePlainHandlerFactory: (Logger, Service) => immutable.Seq[CodeGeneratorResponse.File] = (logger, service) => { val b = CodeGeneratorResponse.File.newBuilder() b.setContent(Handler(service, powerApis = false).body) val serverPath = s"${service.packageDir}/${service.name}HandlerFactory.java" b.setName(serverPath) logger.info(s"Generating Akka gRPC service handler for ${service.packageName}.${service.name}") immutable.Seq(b.build) } val generatePowerHandlerFactory: (Logger, Service) => immutable.Seq[CodeGeneratorResponse.File] = (logger, service) => { if (service.serverPowerApi) { val b = CodeGeneratorResponse.File.newBuilder() b.setContent(Handler(service, powerApis = true).body) val serverPath = s"${service.packageDir}/${service.name}PowerApiHandlerFactory.java" b.setName(serverPath) logger.info(s"Generating Akka gRPC service power API handler for ${service.packageName}.${service.name}") immutable.Seq(b.build) } else immutable.Seq.empty } val generatePowerService: (Logger, Service) => immutable.Seq[CodeGeneratorResponse.File] = (logger, service) => { if (service.serverPowerApi) { val b = CodeGeneratorResponse.File.newBuilder() b.setContent(PowerApiInterface(service).body) b.setName(s"${service.packageDir}/${service.name}PowerApi.java") logger.info(s"Generating Akka gRPC service power interface for [${service.packageName}.${service.name}]") immutable.Seq(b.build) } else immutable.Seq.empty } } object JavaServerCodeGenerator extends JavaServerCodeGenerator
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/internal/Marshaller.scala
<filename>runtime/src/main/scala/akka/grpc/internal/Marshaller.scala /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.internal import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, InputStream } import io.grpc.KnownLength import akka.annotation.InternalApi import akka.grpc.ProtobufSerializer /** * INTERNAL API */ @InternalApi final class Marshaller[T <: scalapb.GeneratedMessage](u: ProtobufSerializer[T]) extends io.grpc.MethodDescriptor.Marshaller[T] { override def parse(stream: InputStream): T = { val baos = new ByteArrayOutputStream(math.max(64, stream.available())) val buffer = new Array[Byte](32 * 1024) // Blocking calls underneath... // we can't avoid it for the moment because we are relying on the Netty's Channel API var bytesRead = stream.read(buffer) while (bytesRead >= 0) { baos.write(buffer, 0, bytesRead) bytesRead = stream.read(buffer) } u.deserialize(akka.util.ByteString(baos.toByteArray)) } override def stream(value: T): InputStream = new ByteArrayInputStream(value.toByteArray) with KnownLength }
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/scaladsl/ScalaTraitCodeGenerator.scala
<gh_stars>0 /* * Copyright (C) 2009-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen.scaladsl import scala.collection.immutable import akka.grpc.gen.Logger import com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse import templates.ScalaCommon.txt.ApiTrait object ScalaTraitCodeGenerator extends ScalaCodeGenerator { override def name = "akka-grpc-scaladsl-trait" override def perServiceContent = super.perServiceContent + generateServiceFile val generateServiceFile: (Logger, Service) => immutable.Seq[CodeGeneratorResponse.File] = (logger, service) => { val b = CodeGeneratorResponse.File.newBuilder() b.setContent(ApiTrait(service).body) b.setName(s"${service.packageDir}/${service.name}.scala") logger.info(s"Generating Akka gRPC service interface for ${service.packageName}.${service.name}") immutable.Seq(b.build) } }
kristoflemmens/akka-grpc
interop-tests/src/test/scala/akka/grpc/interop/app/AkkaHttpServerAppScala.scala
<reponame>kristoflemmens/akka-grpc<gh_stars>0 /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.interop.app import akka.grpc.interop.AkkaHttpServerProviderScala /** * Scala application that starts a web server at localhost serving the test * application used for the gRPC integration tests. * * This can be useful for 'manually' interacting with this server. * * You can start this app from sbt with 'akka-grpc-interop-tests/test:reStart' */ object AkkaHttpServerAppScala extends App { val (sys, binding) = AkkaHttpServerProviderScala.server.start() sys.log.info(s"Bound to ${binding.localAddress}") }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/Codecs.scala
/* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc import akka.grpc.scaladsl.headers.`Message-Accept-Encoding` import akka.http.javadsl.{ model => jm } import scala.collection.immutable object Codecs { // TODO should this list be made user-extensible? val supportedCodecs = immutable.Seq(Gzip) private val supported = supportedCodecs.map(_.name) private val byName = supportedCodecs.map(c => c.name -> c).toMap def negotiate(request: jm.HttpRequest): Codec = `Message-Accept-Encoding` .findIn(request.getHeaders) .intersect(supported) .headOption .map(byName(_)) .getOrElse(Identity) }
kristoflemmens/akka-grpc
runtime/src/test/scala/akka/grpc/JUnitEventually.scala
/* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc import org.scalatest.concurrent.Eventually import org.scalatestplus.junit.JUnitSuite import org.scalatest.time.{ Millis, Span } abstract class JUnitEventually extends JUnitSuite with Eventually { implicit val patience: PatienceConfig = PatienceConfig(timeout = Span(500, Millis), interval = Span(20, Millis)) def junitEventually[T](fun: => T): T = eventually(fun) }
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/Main.scala
<filename>codegen/src/main/scala/akka/grpc/gen/Main.scala /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen import java.io.ByteArrayOutputStream import com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest import akka.grpc.gen.javadsl.{ JavaClientCodeGenerator, JavaInterfaceCodeGenerator, JavaServerCodeGenerator } import akka.grpc.gen.scaladsl.{ ScalaClientCodeGenerator, ScalaServerCodeGenerator, ScalaTraitCodeGenerator } // This is the protoc plugin that the gradle plugin uses object Main extends App { val inBytes: Array[Byte] = { val baos = new ByteArrayOutputStream(math.max(64, System.in.available())) val buffer = new Array[Byte](32 * 1024) var bytesRead = System.in.read(buffer) while (bytesRead >= 0) { baos.write(buffer, 0, bytesRead) bytesRead = System.in.read(buffer) } baos.toByteArray } val req = CodeGeneratorRequest.parseFrom(inBytes) val KeyValueRegex = """([^=]+)=(.*)""".r val parameters = req.getParameter .split(",") .flatMap { case KeyValueRegex(key, value) => Some((key.toLowerCase, value)) case _ => None } .toMap private val languageScala: Boolean = parameters.get("language").map(_.equalsIgnoreCase("scala")).getOrElse(false) private val generateClient: Boolean = parameters.get("generate_client").map(!_.equalsIgnoreCase("false")).getOrElse(true) private val generateServer: Boolean = parameters.get("generate_server").map(!_.equalsIgnoreCase("false")).getOrElse(true) private val extraGenerators: List[String] = parameters.getOrElse("extra_generators", "").split(";").toList.filter(_ != "") private val logger = parameters.get("logfile").map(new FileLogger(_)).getOrElse(SilencedLogger) val out = { val codeGenerators = if (languageScala) { // Scala if (generateClient && generateServer) Seq(ScalaTraitCodeGenerator, ScalaClientCodeGenerator, ScalaServerCodeGenerator) else if (generateClient) Seq(ScalaTraitCodeGenerator, ScalaClientCodeGenerator) else if (generateServer) Seq(ScalaTraitCodeGenerator, ScalaServerCodeGenerator) else throw new IllegalArgumentException("At least one of generateClient or generateServer must be enabled") } else { // Java if (generateClient && generateServer) Seq(JavaInterfaceCodeGenerator, JavaClientCodeGenerator, JavaServerCodeGenerator) else if (generateClient) Seq(JavaInterfaceCodeGenerator, JavaClientCodeGenerator) else if (generateServer) Seq(JavaInterfaceCodeGenerator, JavaServerCodeGenerator) else throw new IllegalArgumentException("At least one of generateClient or generateServer must be enabled") } val loadedExtraGenerators = extraGenerators.map(cls => Class.forName(cls).newInstance().asInstanceOf[CodeGenerator]) (codeGenerators ++ loadedExtraGenerators).foreach { g => val gout = g.run(req, logger) System.out.write(gout.toByteArray) System.out.flush() } } }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/internal/ClientState.scala
<reponame>kristoflemmens/akka-grpc /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.internal import java.util.concurrent.CompletionStage import java.util.concurrent.atomic.AtomicReference import akka.Done import akka.annotation.InternalApi import akka.grpc.GrpcClientSettings import akka.stream.{ ActorMaterializer, Materializer } import io.grpc.ManagedChannel import scala.annotation.tailrec import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.util.Failure import scala.compat.java8.FutureConverters._ /** * INTERNAL API * Used from generated code so can't be private. * * Client utilities taking care of Channel reconnection and Channel lifecycle in general. */ @InternalApi final class ClientState(settings: GrpcClientSettings, channelFactory: GrpcClientSettings => InternalChannel)( implicit mat: Materializer, ex: ExecutionContext) { def this(settings: GrpcClientSettings)(implicit mat: Materializer, ex: ExecutionContext) = this(settings, s => NettyClientUtils.createChannel(s)) private val internalChannelRef = new AtomicReference[Option[InternalChannel]](Some(create())) // usually None, it'll have a value when the underlying InternalChannel is closing or closed. private val closing = new AtomicReference[Option[Future[Done]]](None) private val closeDemand: Promise[Done] = Promise[Done]() mat match { case m: ActorMaterializer => m.system.whenTerminated.foreach(_ => close())(ex) case _ => } def withChannel[A](f: Future[ManagedChannel] => A): A = f { internalChannelRef.get().getOrElse(throw new ClientClosedException).managedChannel } def closedCS(): CompletionStage[Done] = closed().toJava def closeCS(): CompletionStage[Done] = close().toJava def closed(): Future[Done] = // while there's no request to close this RestartingClient, it will continue to restart. // Once there's demand, the `closeDemand` future will redeem flatMapping with the `closing` // future which is a reference to promise of the internalChannel close status. closeDemand.future.flatMap { _ => // `closeDemand` guards the read access to `closing` closing.get().get } @tailrec def close(): Future[Done] = { val maybeChannel = internalChannelRef.get() maybeChannel match { case Some(channel) => // invoke `close` on the channel and capture the `channel.done` returned val done = ChannelUtils.close(channel) // set the `closing` to the current `channel.done` closing.compareAndSet(None, Some(done)) // notify there's been close demand (see `def closed()` above) closeDemand.trySuccess(Done) if (internalChannelRef.compareAndSet(maybeChannel, None)) { done } else { // when internalChannelRef was not maybeChannel if (internalChannelRef.get.isDefined) { // client has had a ClientConnectionException and been re-created, need to shutdown the new one close() } else { // or a competing thread already set `internalChannelRef` to None and CAS failed. done } } case _ => // set the `closing` to immediate success val done = Future.successful(Done) closing.compareAndSet(None, Some(done)) // notify there's been close demand (see `def closed()` above) closeDemand.trySuccess(Done) done } } private def create(): InternalChannel = { val internalChannel: InternalChannel = channelFactory(settings) internalChannel.done.onComplete { case Failure(_: ClientConnectionException | _: NoTargetException) => val old = internalChannelRef.get() if (old.isDefined) { val newInternalChannel = create() // Only one client is alive at a time. However a close() could have happened between the get() and this set if (!internalChannelRef.compareAndSet(old, Some(newInternalChannel))) { // close the newly created client we've been shutdown ChannelUtils.close(newInternalChannel) } } case Failure(_) => close() case _ => // let success through } internalChannel } } /** * INTERNAL API * Used from generated code so can't be private. * * Thrown if a withChannel call is called after closing the internal channel */ @InternalApi final class ClientClosedException() extends RuntimeException("withChannel called after close()")
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/GrpcServiceException.scala
<filename>runtime/src/main/scala/akka/grpc/GrpcServiceException.scala<gh_stars>0 /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc import io.grpc.Status class GrpcServiceException(val status: Status) extends RuntimeException(status.getDescription) { require(!status.isOk, "Use GrpcServiceException in case of failure, not as a flow control mechanism.") }
kristoflemmens/akka-grpc
codegen/src/main/scala/akka/grpc/gen/scaladsl/Method.scala
/* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.gen.scaladsl import com.google.protobuf.Descriptors.{ Descriptor, MethodDescriptor } import akka.grpc.gen._ import scalapb.compiler.DescriptorImplicits case class Method( name: String, grpcName: String, inputType: Descriptor, inputStreaming: Boolean, outputType: Descriptor, outputStreaming: Boolean, comment: Option[String] = None)(implicit ops: DescriptorImplicits) { import Method._ def deserializer = Serializer(inputType) def serializer = Serializer(outputType) def unmarshal = if (inputStreaming) "GrpcMarshalling.unmarshalStream" else "GrpcMarshalling.unmarshal" def marshal = if (outputStreaming) "GrpcMarshalling.marshalStream" else "GrpcMarshalling.marshal" def parameterType = if (inputStreaming) s"akka.stream.scaladsl.Source[${messageType(inputType)}, akka.NotUsed]" else messageType(inputType) def inputTypeUnboxed = messageType(inputType) def outputTypeUnboxed = messageType(outputType) def returnType = if (outputStreaming) s"akka.stream.scaladsl.Source[${messageType(outputType)}, akka.NotUsed]" else s"scala.concurrent.Future[${messageType(outputType)}]" val methodType: MethodType = { (inputStreaming, outputStreaming) match { case (false, false) => Unary case (true, false) => ClientStreaming case (false, true) => ServerStreaming case (true, true) => BidiStreaming } } } object Method { def apply(descriptor: MethodDescriptor)(implicit ops: DescriptorImplicits): Method = { import ops._ Method( name = methodName(descriptor.getName), grpcName = descriptor.getName, descriptor.getInputType, descriptor.toProto.getClientStreaming, descriptor.getOutputType, descriptor.toProto.getServerStreaming, descriptor.comment) } private def methodName(name: String) = name.head.toLower +: name.tail def messageType(messageType: Descriptor)(implicit ops: DescriptorImplicits) = { import ops._ messageType.scalaTypeName } private def outerClass(t: Descriptor) = if (t.getFile.toProto.getOptions.getJavaMultipleFiles) "" else { val outerClassName = t.getFile.toProto.getOptions.getJavaOuterClassname if (outerClassName == "") { protoName(t).head.toUpper + protoName(t).tail + "." } else { outerClassName + "." } } private def protoName(t: Descriptor) = t.getFile.getName.replaceAll("\\.proto", "").split("/").last }
kristoflemmens/akka-grpc
runtime/src/main/scala/akka/grpc/internal/CancellationBarrierGraphStage.scala
<filename>runtime/src/main/scala/akka/grpc/internal/CancellationBarrierGraphStage.scala /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.internal import akka.stream.{ Attributes, FlowShape, Inlet, Outlet } import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } /** * 'barrier' that makes sure that, even when downstream is cancelled, * the complete upstream is consumed. * * @tparam T */ class CancellationBarrierGraphStage[T] extends GraphStage[FlowShape[T, T]] { val in: Inlet[T] = Inlet("CancellationBarrier") val out: Outlet[T] = Outlet("CancellationBarrier") override val shape: FlowShape[T, T] = FlowShape(in, out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { setHandler(in, new InHandler { override def onPush(): Unit = emit(out, grab(in)) }) setHandler( out, new OutHandler { override def onPull(): Unit = pull(in) override def onDownstreamFinish(): Unit = { if (!hasBeenPulled(in)) pull(in) setHandler(in, new InHandler { override def onPush(): Unit = { grab(in) pull(in) } }) } }) } }
kristoflemmens/akka-grpc
plugin-tester-scala/src/main/scala/example/myapp/helloworld/PowerGreeterServiceImpl.scala
<reponame>kristoflemmens/akka-grpc /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ //#full-service-impl package example.myapp.helloworld import akka.NotUsed import akka.grpc.scaladsl.Metadata import akka.stream.Materializer import akka.stream.scaladsl.{ Sink, Source } import example.myapp.helloworld.grpc._ import scala.concurrent.Future class PowerGreeterServiceImpl(materializer: Materializer) extends GreeterServicePowerApi { import materializer.executionContext private implicit val mat: Materializer = materializer override def sayHello(in: HelloRequest, metadata: Metadata): Future[HelloReply] = { val greetee = authTaggedName(in, metadata) println(s"sayHello to $greetee") Future.successful(HelloReply(s"Hello, $greetee")) } override def itKeepsTalking(in: Source[HelloRequest, NotUsed], metadata: Metadata): Future[HelloReply] = { println(s"sayHello to in stream...") in.runWith(Sink.seq) .map(elements => HelloReply(s"Hello, ${elements.map(authTaggedName(_, metadata)).mkString(", ")}")) } override def itKeepsReplying(in: HelloRequest, metadata: Metadata): Source[HelloReply, NotUsed] = { val greetee = authTaggedName(in, metadata) println(s"sayHello to $greetee with stream of chars...") Source(s"Hello, $greetee".toList).map(character => HelloReply(character.toString)) } override def streamHellos(in: Source[HelloRequest, NotUsed], metadata: Metadata): Source[HelloReply, NotUsed] = { println(s"sayHello to stream...") in.map(request => HelloReply(s"Hello, ${authTaggedName(request, metadata)}")) } // Bare-bones just for GRPC metadata demonstration purposes private def isAuthenticated(metadata: Metadata): Boolean = metadata.getText("authorization").nonEmpty private def authTaggedName(in: HelloRequest, metadata: Metadata): String = { val authenticated = isAuthenticated(metadata) s"${in.name} (${if (!authenticated) "not " else ""}authenticated)" } } //#full-service-impl
kristoflemmens/akka-grpc
plugin-tester-java/src/test/scala/example/myapp/helloworld/ErrorReportingSpec.scala
/* * Copyright (C) 2019 Lightbend Inc. <https://www.lightbend.com> */ package example.myapp.helloworld import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpEntity.{ Chunked, LastChunk } import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse, StatusCodes } import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import example.myapp.helloworld.grpc.{ GreeterService, GreeterServiceHandlerFactory } import io.grpc.Status import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.Span import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec } import scala.concurrent.Await import scala.concurrent.duration._ class ErrorReportingSpec extends WordSpec with Matchers with ScalaFutures with BeforeAndAfterAll { implicit val sys = ActorSystem() override implicit val patienceConfig = PatienceConfig(5.seconds, Span(100, org.scalatest.time.Millis)) "A gRPC server" should { implicit val mat = ActorMaterializer() val handler = GreeterServiceHandlerFactory.create(new GreeterServiceImpl(mat), mat, sys) val binding = { import akka.http.javadsl.{ ConnectHttp, Http, HttpConnectionContext, UseHttp2 } Http(sys) .bindAndHandleAsync( handler, // We test responding to invalid requests with HTTP/1.1 since // we don't have a raw HTTP/2 client available to construct invalid // HTTP/2 requests. ConnectHttp.toHost("127.0.0.1", 0, UseHttp2.never), mat) .toCompletableFuture .get } "respond with an 'unimplemented' gRPC error status when calling an unknown method" in { val request = HttpRequest(uri = s"http://localhost:${binding.localAddress.getPort}/${GreeterService.name}/UnknownMethod") val response = Http().singleRequest(request).futureValue response.status should be(StatusCodes.OK) allHeaders(response) should contain(RawHeader("grpc-status", Status.Code.UNIMPLEMENTED.value().toString)) } "respond with an 'internal' gRPC error status when calling an method without a request body" in { val request = HttpRequest( method = HttpMethods.POST, uri = s"http://localhost:${binding.localAddress.getPort}/${GreeterService.name}/SayHello") val response = Http().singleRequest(request).futureValue response.status should be(StatusCodes.OK) allHeaders(response) should contain(RawHeader("grpc-status", Status.Code.INTERNAL.value().toString)) } def allHeaders(response: HttpResponse) = response.entity match { case Chunked(_, chunks) => chunks.runWith(Sink.last).futureValue match { case LastChunk(_, trailingHeaders) => response.headers ++ trailingHeaders case _ => response.headers } case _ => response.headers } } override def afterAll: Unit = Await.result(sys.terminate(), 5.seconds) }
kristoflemmens/akka-grpc
interop-tests/src/main/scala/akka/grpc/interop/AkkaGrpcClientScala.scala
<filename>interop-tests/src/main/scala/akka/grpc/interop/AkkaGrpcClientScala.scala /* * Copyright (C) 2018-2019 Lightbend Inc. <https://www.lightbend.com> */ package akka.grpc.interop import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, Materializer } import io.grpc.internal.testing.TestUtils import io.grpc.testing.integration2.{ ClientTester, Settings, TestServiceClient } import scala.concurrent.Await // TODO #151 use our own Settings object final case class AkkaGrpcClientScala(clientTesterFactory: Settings => Materializer => ActorSystem => ClientTester) extends GrpcClient { override def run(args: Array[String]): Unit = { TestUtils.installConscryptIfAvailable() val settings = Settings.parseArgs(args) implicit val sys = ActorSystem() implicit val mat = ActorMaterializer() val client = new TestServiceClient(clientTesterFactory(settings)(mat)(sys)) client.setUp() try client.run(settings) finally { client.tearDown() Await.result(sys.terminate(), 10.seconds) } } }
clvcooke/mnist-reader-scala
MnistLoader.scala
<filename>MnistLoader.scala<gh_stars>0 import java.io.{BufferedInputStream, FileInputStream} import java.util.zip.GZIPInputStream import org.nd4j.linalg.factory.Nd4j /** * Created by clvcooke on 6/6/2016. */ object MnistLoader { private def gzipInputStream(s: String) = new GZIPInputStream(new BufferedInputStream(new FileInputStream(s))) private def read32BitInt(i: GZIPInputStream) = i.read() * 16777216 /*2^24*/ + i.read() * 65536 /*2&16*/ + i.read() * 256 /*2^8*/ + i.read() /** * * @param baseDirectory the directory for the standard mnist images, file names are assumed */ def getMnistImageData(baseDirectory: String): (IndexedSeq[Int], IndexedSeq[Int], IndexedSeq[INDArray], IndexedSeq[INDArray]) = { val testLabels = readLabels(s"$baseDirectory/t10k-labels-idx1-ubyte.gz") val trainingLabels = readLabels(s"$baseDirectory/train-labels-idx1-ubyte.gz") val testImages = readImages(s"$baseDirectory/t10k-images-idx3-ubyte.gz") val trainingImages = readImages(s"$baseDirectory/train-images-idx3-ubyte.gz") (testLabels, trainingLabels, testImages, trainingImages) } /** * * @param filepath the full file path the labels file * @return */ def readLabels(filepath: String) = { val g = gzipInputStream(filepath) val magicNumber = read32BitInt(g) //currently not used for anything, as assumptions are made val numberOfLabels = read32BitInt(g) 1.to(numberOfLabels).map(_ => g.read()) } /** * * @param filepath the full file path of the images file * @return */ def readImages(filepath: String) = { val g = gzipInputStream(filepath) val magicNumber = read32BitInt(g) //currently not used for anything, as assumptions are made val numberOfImages = read32BitInt(g) val imageSize = read32BitInt(g) * read32BitInt(g) //cols * rows 1.to(numberOfImages).map(_ => Nd4j.create(1.to(imageSize).map(_ => g.read().toFloat).toArray)) } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/rdd/GroupWith.scala
<reponame>lucienh/learning-spark package com.javachen.spark.examples.rdd import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ object GroupWith { def main(args: Array[String]) { val sc = new SparkContext("local[2]", "GroupWith Test") val data1 = Array[(String, Int)](("A1", 1), ("A2", 2), ("B1", 3), ("B2", 4), ("C1", 5), ("C1", 6) ) val data2 = Array[(String, Int)](("A1", 7), ("A2", 8), ("B1", 9), ("C1", 0) ) val pairs1 = sc.parallelize(data1, 3) val pairs2 = sc.parallelize(data2, 2) val result = pairs1.groupWith(pairs2) result.foreach(println) // output: // (B1,(ArrayBuffer(3),ArrayBuffer(9))) // (A1,(ArrayBuffer(1),ArrayBuffer(7))) // (A2,(ArrayBuffer(2),ArrayBuffer(8))) // // (C1,(ArrayBuffer(5, 6),ArrayBuffer(0))) // (B2,(ArrayBuffer(4),ArrayBuffer())) } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/rdd/Pipe.scala
<gh_stars>10-100 package com.javachen.spark.examples.rdd /** * * @author <a href="mailto:<EMAIL>">june</a>. * @date 2015-05-12 17:21. */ object Pipe { }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/mllib/MovieLensALS.scala
<reponame>lucienh/learning-spark /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.javachen.spark.examples.mllib import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating} import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import scopt.OptionParser import scala.collection.mutable /** * An example app for ALS on MovieLens data (http://grouplens.org/datasets/movielens/). * Run with * {{{ * bin/run-example org.apache.spark.examples.mllib.MovieLensALS * }}} * A synthetic dataset in MovieLens format can be found at `data/mllib/sample_movielens_data.txt`. * If you use it as a template to create your own app, please use `spark-submit` to submit your app. */ object MovieLensALS { case class Params( input: String = "data/ml-1m/ratings.dat", userDataInput: String = "data/ml-1m/personalRatings.txt", kryo: Boolean = false, numIterations: Int = 20, lambda: Double = 1.0, rank: Int = 10, numUserBlocks: Int = -1, numProductBlocks: Int = -1, implicitPrefs: Boolean = false) def main(args: Array[String]) { val defaultParams = Params() val parser = new OptionParser[Params]("MovieLensALS") { head("MovieLensALS: an example app for ALS on MovieLens data.") opt[Int]("rank") .text(s"rank, default: ${defaultParams.rank}}") .action((x, c) => c.copy(rank = x)) opt[Int]("numIterations") .text(s"number of iterations, default: ${defaultParams.numIterations}") .action((x, c) => c.copy(numIterations = x)) opt[Double]("lambda") .text(s"lambda (smoothing constant), default: ${defaultParams.lambda}") .action((x, c) => c.copy(lambda = x)) opt[Unit]("kryo") .text("use Kryo serialization") .action((_, c) => c.copy(kryo = true)) opt[Int]("numUserBlocks") .text(s"number of user blocks, default: ${defaultParams.numUserBlocks} (auto)") .action((x, c) => c.copy(numUserBlocks = x)) opt[Int]("numProductBlocks") .text(s"number of product blocks, default: ${defaultParams.numProductBlocks} (auto)") .action((x, c) => c.copy(numProductBlocks = x)) opt[Unit]("implicitPrefs") .text("use implicit preference") .action((_, c) => c.copy(implicitPrefs = true)) opt[String]("userDataInput") .required() .text("input paths to user dataset") .action((x, c) => c.copy(userDataInput = x)) arg[String]("<input>") .required() .text("input paths to a MovieLens dataset of ratings") .action((x, c) => c.copy(input = x)) note( """ |For example, the following command runs this app on a synthetic dataset: | | bin/spark-submit --class com.javachen.grab.examples.mllib.MovieLensALS \ | examples/target/scala-*/grab-examples-*.jar \ | --rank 5 --numIterations 20 --lambda 1.0 \ | --userDataInput data/ml-1m/personalRatings.txt \ | data/ml-1m/ratings.dat """.stripMargin) } parser.parse(args, defaultParams).map { params => run(params) } getOrElse { System.exit(1) } } def run(params: Params) { val conf = new SparkConf().setAppName(s"MovieLensALS with $params").set("spark.executor.memory", "2g") if (params.kryo) { conf.registerKryoClasses(Array(classOf[mutable.BitSet], classOf[Rating])) .set("spark.kryoserializer.buffer.mb", "8") } val sc = new SparkContext(conf) val ratings = sc.textFile(params.input).map { line => val fields = line.split("::") /* * MovieLens ratings are on a scale of 1-5: * 5: Must see * 4: Will enjoy * 3: It's okay * 2: Fairly bad * 1: Awful * So we should not recommend a movie if the predicted rating is less than 3. * To map ratings to confidence scores, we use * 5 -> 2.5, 4 -> 1.5, 3 -> 0.5, 2 -> -0.5, 1 -> -1.5. This mappings means unobserved * entries are generally between It's okay and Fairly bad. * The semantics of 0 in this expanded world of non-positive weights * are "the same as never having interacted at all". */ if (params.implicitPrefs) { // format: (timestamp % 10, Rating(userId, movieId, rating)) (fields(3).toLong % 10, Rating(fields(0).toInt, fields(1).toInt, fields(2).toDouble - 2.5)) } else { // format: (timestamp % 10, Rating(userId, movieId, rating)) (fields(3).toLong % 10, Rating(fields(0).toInt, fields(1).toInt, fields(2).toDouble)) } }.cache() val numRatings = ratings.count() val numUsers = ratings.map(_._2.user).distinct().count() val numMovies = ratings.map(_._2.product).distinct().count() println("Got " + numRatings + " ratings from " + numUsers + " users on " + numMovies + " movies.") val model = evaluateMode(params, ratings) predictMoive(params, sc, model) // clean up sc.stop() } def predictMoive(params: Params, sc: SparkContext, model: MatrixFactorizationModel): Unit = { //为用户1推荐10个 var rs = model.recommendProducts(1, 10) var value = "" var key = 0 //保存推荐数据到hbase中 rs.foreach(r => { key = r.user value = value + r.product + ":" + r.rating + "," }) println(value) } def evaluateMode(params: Params, ratings: RDD[(Long, Rating)]): MatrixFactorizationModel = { val training = ratings.values.repartition(4) //建立模型 val start = System.currentTimeMillis() val model = new ALS().setRank(params.rank).setIterations(params.numIterations).setLambda(params.lambda).setImplicitPrefs(params.implicitPrefs).setUserBlocks(params.numUserBlocks).setProductBlocks(params.numProductBlocks).run(training) println("Train Time = " + (System.currentTimeMillis() - start) * 1.0 / 1000) val testRmse = computeRmse(model, training) println("RMSE = " + testRmse) model } /** Compute RMSE (Root Mean Squared Error). */ def computeRmse(model: MatrixFactorizationModel, data: RDD[Rating]) = { val usersProducts = data.map { case Rating(user, product, rate) => (user, product) } val predictions = model.predict(usersProducts).map { case Rating(user, product, rate) => ((user, product), rate) } val ratesAndPreds = data.map { case Rating(user, product, rate) => ((user, product), rate) }.join(predictions).sortByKey() math.sqrt(ratesAndPreds.map { case ((user, product), (r1, r2)) => val err = (r1 - r2) err * err }.mean()) } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/rdd/Cartesian.scala
package com.javachen.spark.examples.rdd import org.apache.spark.SparkContext object Cartesian { def main(args: Array[String]) { val sc = new SparkContext("local", "Cartesian Test") val data1 = Array[(String, Int)](("A1", 1), ("A2", 2), ("B1", 3), ("B2", 4), ("C1", 5), ("C1", 6)) val data2 = Array[(String, Int)](("A1", 7), ("A2", 8), ("B1", 9), ("C1", 0)) val pairs1 = sc.parallelize(data1, 3) val pairs2 = sc.parallelize(data2, 2) val resultRDD = pairs1.cartesian(pairs2) resultRDD.foreach(println) /* * Output of task1: * ((A1,1),(A1,7)) * ((A1,1),(A2,8)) * ((A2,2),(A1,7)) * ((A2,2),(A2,8)) * Output of task2: * ((A1,1),(B1,9)) * ((A1,1),(C1,0)) * ((A2,2),(B1,9)) * ((A2,2),(C1,0)) * Output of task3: * ((B1,3),(A1,7)) * ((B1,3),(A2,8)) * ((B2,4),(A1,7)) * ((B2,4),(A2,8)) * Output of task4: * ((B1,3),(B1,9)) * ((B1,3),(C1,0)) * ((B2,4),(B1,9)) * ((B2,4),(C1,0)) * Output of task5: * ((C1,5),(A1,7)) * ((C1,5),(A2,8)) * ((C1,6),(A1,7)) * ((C1,6),(A2,8)) * Output of task6: * ((C1,5),(B1,9)) * ((C1,5),(C1,0)) * ((C1,6),(B1,9)) * ((C1,6),(C1,0)) */ } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/rdd/Lookup.scala
package com.javachen.spark.examples.rdd import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ object Lookup { def main(args: Array[String]) { val sc = new SparkContext("local", "LookUp Test") val data = Array[(String, Int)](("A", 1), ("B", 2), ("B", 3), ("C", 4), ("C", 5), ("C", 6)) val pairs = sc.parallelize(data, 3) val finalRDD = pairs.lookup("B") finalRDD.foreach(println) // output: // 2 // 3 } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/rdd/PartitionBy.scala
package com.javachen.spark.examples.rdd import org.apache.spark.{RangePartitioner,HashPartitioner, SparkContext} object PartitionBy { def main(args: Array[String]) { val sc = new SparkContext("local", "ReduceByKeyToDriver Test") val data1 = Array[(String, Int)](("K", 1), ("T", 2), ("T", 3), ("W", 4), ("W", 5), ("W", 6) ) val pairs = sc.parallelize(data1, 3) //val result = pairs.reduce((A, B) => (A._1 + "#" + B._1, A._2 + B._2)) //val result = pairs.fold(("K0",10))((A, B) => (A._1 + "#" + B._1, A._2 + B._2)) var result = pairs.partitionBy(new RangePartitioner(2, pairs, true)) result = pairs.partitionBy(new HashPartitioner(2)) result.foreach(println) } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/mllib/ScalaLocalALS.scala
<reponame>lucienh/learning-spark package com.javachen.grab import org.apache.spark.mllib.recommendation.{ALS, Rating} import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import org.jblas.DoubleMatrix import scala.sys.process._ import org.apache.log4j.{Level, Logger} /** * 本地模式运行 */ object ScalaLocalALS { def main(args: Array[String]): Unit = { val sc = new SparkContext(new SparkConf().setAppName("Scala Collaborative Filtering Example")) Logger.getLogger("org.apache.spark").setLevel(Level.WARN) Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN) Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF) // 1. 加载并解析数据 val data = sc.textFile("data/ml-1m/ratings.dat") val ratings = data.map(_.split("::") match { case Array(user, item, rate, ts) => Rating(user.toInt, item.toInt, rate.toDouble) }).cache() val users = ratings.map(_.user).distinct() val products = ratings.map(_.product).distinct() println("Got "+ratings.count()+" ratings from "+users.count+" users on "+products.count+" products.") //Got 1000209 ratings from 6040 users on 3706 products. // 2. 训练模型 val rank = 12 val lambda = 0.01 val numIterations = 20 val model = ALS.train(ratings, rank, numIterations, lambda) model.userFeatures model.userFeatures.count model.productFeatures model.productFeatures.count // 3. 计算均方差 //从 ratings 中获得只包含用户和商品的数据集 val usersProducts = ratings.map { case Rating(user, product, rate) => (user, product) } usersProducts.count //Long = 1000209 //使用推荐模型对用户商品进行预测评分,得到预测评分的数据集 var predictions = model.predict(usersProducts).map { case Rating(user, product, rate) => ((user, product), rate) } predictions.count //Long = 1000209 //将真实评分数据集与预测评分数据集进行合并 val ratesAndPreds = ratings.map { case Rating(user, product, rate) => ((user, product), rate) }.join(predictions) ratesAndPreds.count //Long = 1000209 val rmse= math.sqrt(ratesAndPreds.map { case ((user, product), (r1, r2)) => val err = (r1 - r2) err * err }.mean()) println(s"RMSE = $rmse") // 4.保存预测评分,确保只生成一个文件,并排序 "rm -r /tmp/result".! ratesAndPreds.sortByKey().repartition(1).sortBy(_._1).map({ case ((user, product), (rate, pred)) => (user + "," + product + "," + rate + "," + pred) }).saveAsTextFile("/tmp/result") //对预测的评分结果按用户进行分组并按评分倒排序 predictions.map { case ((user, product), rate) => (user, (product, rate)) }.groupByKey().map{case (user_id,list)=> (user_id,list.toList.sortBy {case (goods_id,rate)=> - rate}) } // 5. 对某一个用户推荐所有商品 users.take(5) //Array[Int] = Array(384, 1084, 4904, 3702, 5618) val userId = users.take(1)(0) //384 val K = 10 val topKRecs = model.recommendProducts(userId, K) //topKRecs: Array[org.apache.spark.mllib.recommendation.Rating] = Array(Rating(384,1539,7.360670267591244), Rating(384,219,6.736019537477872), Rating(384,1520,6.730562698267339), Rating(384,775,6.697620546404394), Rating(384,3161,6.49555676613329), Rating(384,2711,6.445916831219404), Rating(384,2503,6.428273027496898), Rating(384,771,6.4255234943275825), Rating(384,853,6.170422982870869), Rating(384,759,6.04929517890501)) println(topKRecs.mkString("\n")) // Rating(384,1539,7.360670267591244) // Rating(384,219,6.736019537477872) // Rating(384,1520,6.730562698267339) // Rating(384,775,6.697620546404394) // Rating(384,3161,6.49555676613329) // Rating(384,2711,6.445916831219404) // Rating(384,2503,6.428273027496898) // Rating(384,771,6.4255234943275825) // Rating(384,853,6.170422982870869) // Rating(384,759,6.04929517890501) val productsForUser=ratings.keyBy(_.user).lookup(384) // Seq[org.apache.spark.mllib.recommendation.Rating] = WrappedArray(Rating(384,2055,2.0), Rating(384,1197,4.0), Rating(384,593,5.0), Rating(384,599,3.0), Rating(384,673,2.0), Rating(384,3037,4.0), Rating(384,1381,2.0), Rating(384,1610,4.0), Rating(384,3074,4.0), Rating(384,204,4.0), Rating(384,3508,3.0), Rating(384,1007,3.0), Rating(384,260,4.0), Rating(384,3487,3.0), Rating(384,3494,3.0), Rating(384,1201,5.0), Rating(384,3671,5.0), Rating(384,1207,4.0), Rating(384,2947,4.0), Rating(384,2951,4.0), Rating(384,2896,2.0), Rating(384,1304,5.0)) productsForUser.size //Int = 22 productsForUser.sortBy(-_.rating).take(10).map(rating => (rating.product, rating.rating)).foreach(println) // (593,5.0) // (1201,5.0) // (3671,5.0) // (1304,5.0) // (1197,4.0) // (3037,4.0) // (1610,4.0) // (3074,4.0) // (204,4.0) // (260,4.0) /* Compute squared error between a predicted and actual rating */ // We'll take the first rating for our example user 789 val actualRating = productsForUser.take(1)(0) //actualRating: org.apache.spark.mllib.recommendation.Rating = Rating(384,2055,2.0) val predictedRating = model.predict(789, actualRating.product) val predictedRating = model.predict(384, actualRating.product) //predictedRating: Double = 1.9426030777174637 //找出和2055商品最相似的商品 val itemId = 2055 val itemFactor = model.productFeatures.lookup(itemId).head //itemFactor: Array[Double] = Array(0.3660752773284912, 0.43573060631752014, -0.3421429991722107, 0.44382765889167786, -1.4875195026397705, 0.6274569630622864, -0.3264533579349518, -0.9939845204353333, -0.8710321187973022, -0.7578890323638916, -0.14621856808662415, -0.7254264950752258) val itemVector = new DoubleMatrix(itemFactor) //itemVector: org.jblas.DoubleMatrix = [0.366075; 0.435731; -0.342143; 0.443828; -1.487520; 0.627457; -0.326453; -0.993985; -0.871032; -0.757889; -0.146219; -0.725426] cosineSimilarity(itemVector, itemVector) // res99: Double = 0.9999999999999999 val sims = model.productFeatures.map{ case (id, factor) => val factorVector = new DoubleMatrix(factor) val sim = cosineSimilarity(factorVector, itemVector) (id, sim) } val sortedSims = sims.top(K)(Ordering.by[(Int, Double), Double] { case (id, similarity) => similarity }) //sortedSims: Array[(Int, Double)] = Array((2055,0.9999999999999999), (2051,0.9138311231145874), (3520,0.8739823400539756), (2190,0.8718466671129721), (2050,0.8612639515847019), (1011,0.8466911667526461), (2903,0.8455764332511272), (3121,0.8227325520485377), (3674,0.8075743004357392), (2016,0.8063817280259447)) println(sortedSims.mkString("\n")) // (2055,0.9999999999999999) // (2051,0.9138311231145874) // (3520,0.8739823400539756) // (2190,0.8718466671129721) // (2050,0.8612639515847019) // (1011,0.8466911667526461) // (2903,0.8455764332511272) // (3121,0.8227325520485377) // (3674,0.8075743004357392) // (2016,0.8063817280259447) val sortedSims2 = sims.top(K + 1)(Ordering.by[(Int, Double), Double] { case (id, similarity) => similarity }) //sortedSims2: Array[(Int, Double)] = Array((2055,0.9999999999999999), (2051,0.9138311231145874), (3520,0.8739823400539756), (2190,0.8718466671129721), (2050,0.8612639515847019), (1011,0.8466911667526461), (2903,0.8455764332511272), (3121,0.8227325520485377), (3674,0.8075743004357392), (2016,0.8063817280259447), (3672,0.8016276723120674)) sortedSims2.slice(1, 11).map{ case (id, sim) => (id, sim) }.mkString("\n") // (2051,0.9138311231145874) // (3520,0.8739823400539756) // (2190,0.8718466671129721) // (2050,0.8612639515847019) // (1011,0.8466911667526461) // (2903,0.8455764332511272) // (3121,0.8227325520485377) // (3674,0.8075743004357392) // (2016,0.8063817280259447) // (3672,0.8016276723120674) //计算给该用户推荐的前K个商品的平均准确度MAPK val actualProducts= productsForUser.map(_.product) //actualProducts: Seq[Int] = ArrayBuffer(2055, 1197, 593, 599, 673, 3037, 1381, 1610, 3074, 204, 3508, 1007, 260, 3487, 3494, 1201, 3671, 1207, 2947, 2951, 2896, 1304) val predictedProducts= topKRecs.map(_.product) //predictedProducts:Array[Int] = Array(1539, 219, 1520, 775, 3161, 2711, 2503, 771, 853, 759) val apk10 = avgPrecisionK(actualProducts, predictedProducts, 10) // apk10: Double = 0.0 users.collect.flatMap { user => model.recommendProducts(user, 10) } //计算所有的推荐结果 val itemFactors = model.productFeatures.map { case (prodcut, factor) => factor }.collect() val itemMatrix = new DoubleMatrix(itemFactors) println(itemMatrix.rows, itemMatrix.columns) val imBroadcast = sc.broadcast(itemMatrix) var idxProducts=model.productFeatures.map { case (prodcut, factor) => prodcut }.zipWithIndex().map{case (prodcut, idx) => (idx,prodcut)}.collectAsMap() val idxProductsBroadcast = sc.broadcast(idxProducts) val allRecs = model.userFeatures.map{ case (user, array) => val userVector = new DoubleMatrix(array) val scores = imBroadcast.value.mmul(userVector) val sortedWithId = scores.data.zipWithIndex.sortBy(-_._1) val recommendedProducts = sortedWithId.map(_._2).map{idx=>idxProductsBroadcast.value.get(idx).get} (user, recommendedProducts) //recommendedIds 为索引 } //验证结果是否正确 allRecs.lookup(384).head.take(10) //res50: Array[Int] = Array(1539, 219, 1520, 775, 3161, 2711, 2503, 771, 853, 759) topKRecs.map(_.product) //res49: Array[Int] = Array(1539, 219, 1520, 775, 3161, 2711, 2503, 771, 853, 759) //得到每个用户评分过的所有商品 val userProducts = ratings.map{ case Rating(user, product, rating) => (user, product) }.groupBy(_._1) // finally, compute the APK for each user, and average them to find MAPK val MAPK = allRecs.join(userProducts).map{ case (userId, (predictedProducts, actualList)) => val actualProducts = actualList.map{case (user, product)=>product}.toSeq avgPrecisionK(actualProducts, predictedProducts, K) }.reduce(_ + _) / allRecs.count println("Mean Average Precision at K = " + MAPK) // MSE, RMSE and MAE import org.apache.spark.mllib.evaluation.RegressionMetrics val predictedAndTrue = ratesAndPreds.map { case ((user, product), (actual, predicted)) => (actual, predicted) } val regressionMetrics = new RegressionMetrics(predictedAndTrue) println("Mean Squared Error = " + regressionMetrics.meanSquaredError) println("Root Mean Squared Error = " + regressionMetrics.rootMeanSquaredError) // Mean Squared Error = 0.08231947642632852 // Root Mean Squared Error = 0.2869137090247319 // MAPK import org.apache.spark.mllib.evaluation.RankingMetrics val predictedAndTrueForRanking = allRecs.join(userProducts).map{ case (userId, (predicted, actualWithIds)) => val actual = actualWithIds.map(_._2) (predicted.toArray, actual.toArray) } val rankingMetrics = new RankingMetrics(predictedAndTrueForRanking) println("Mean Average Precision = " + rankingMetrics.meanAveragePrecision) // Mean Average Precision = 0.07171412913757183 // Compare to our implementation, using K = 2000 to approximate the overall MAP val MAPK2000 = allRecs.join(userProducts).map{ case (userId, (predicted, actualWithIds)) => val actual = actualWithIds.map(_._2).toSeq avgPrecisionK(actual, predicted, 2000) }.reduce(_ + _) / allRecs.count println("Mean Average Precision = " + MAPK2000) // recommendsByUserTopN.foreachPartition(partitionOfRecords => { // partitionOfRecords.foreach(pair => { // val jedis = RedisClient.pool.getResource // jedis.set(pair._1.toString,pair._2.mkString(",")) // jedis.close() // }) // }) } /* Compute the cosine similarity between two vectors */ def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = { vec1.dot(vec2) / (vec1.norm2() * vec2.norm2()) } /* Function to compute average precision given a set of actual and predicted ratings */ // Code for this function is based on: https://github.com/benhamner/Metrics def avgPrecisionK(actual: Seq[Int], predicted: Seq[Int], k: Int): Double = { val predK = predicted.take(k) var score = 0.0 var numHits = 0.0 for ((p, i) <- predK.zipWithIndex) { if (actual.contains(p)) { numHits += 1.0 score += numHits / (i.toDouble + 1.0) } } if (actual.isEmpty) { 1.0 } else { score / scala.math.min(actual.size, k).toDouble } } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/mllib/EvaluateResult.scala
<reponame>lucienh/learning-spark<filename>src/main/scala/com/javachen/spark/examples/mllib/EvaluateResult.scala<gh_stars>10-100 package com.javachen.grab import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating} import org.apache.spark.rdd.RDD /** * * Created by <a href="mailto:<EMAIL>">june</a> on 2015-05-27 09:13. */ object EvaluateResult { def coverage(training: RDD[Rating],userRecommends:RDD[(Int, List[Int])])={ userRecommends.flatMap(_._2).distinct().count.toDouble / training.map(_.product).distinct().count } def popularity(training: RDD[Rating],userRecommends:RDD[(Int, List[Int])])={ var ret = 0.0 var n=0 val item_popularity=training.map{ case Rating(user, product, rate) => (product,(user, rate)) }.groupByKey(4).map{case (product,list)=> (product,list.size) }.collectAsMap() userRecommends.flatMap(_._2).collect().foreach { p => ret = ret + math.log(1 + item_popularity.get(p).get) n = n + 1 } ret/n } def recallAndPrecisionAndF1(training: RDD[Rating],userRecommends:RDD[(Int, List[Int])]):(Double, Double,Double) = { val usersProducts: RDD[(Int, Int)] = training.map { case Rating(user, product, rate) => (user, product) } val groupData=userRecommends.join(usersProducts.groupByKey().map {case (k,v) => (k,v.toList)}) val (hit, testNum, recNum) = groupData.map{ case (user, (mItems, tItems)) => var count = 0 // 计算准确率:推荐命中商品数/实际推荐商品数, topN为推荐上限值 val precNum = mItems.length for (i <- 0 until precNum) if (tItems.contains(mItems(i))) count += 1 (count, tItems.length, precNum) }.reduce( (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2, t1._3 + t2._3) ) val recall: Double = hit * 1.0 / testNum val precision: Double = hit * 1.0 / recNum val f1: Double = 2 * recall * precision / (recall + precision) println(s"$hit,$testNum,$recNum") (recall,precision,f1) } def recallAndPrecision(test:RDD[Rating],result:RDD[Rating]):Double = { val numHit: Long = result.intersection(test).count val recall: Double = numHit * 1.0 / test.count val precision: Double = numHit * 1.0 / result.count val f1: Double = 2 * recall * precision / (recall + precision) System.out.println("recall : " + recall + "\nprecision : " + precision + "\nf1 : " + f1) f1 } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/mllib/ScalaMovieLensALS.scala
<filename>src/main/scala/com/javachen/spark/examples/mllib/ScalaMovieLensALS.scala package com.javachen.spark.examples.mllib import java.util.Random import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating} import org.apache.spark.rdd._ import org.apache.spark.{SparkConf, SparkContext} /** * see:https://github.com/mohit-shrma/RandomSamples/blob/d9f1117bc21bb09d9fa858bc6d95e08e753e6fa0/SparkScala/CollabFilter/src/main/scala/MovieLensALS.scala */ object ScalaMovieLensALS { def main(args: Array[String]) { //import org.apache.log4j.{Logger,Level} //Logger.getLogger("org.apache.spark").setLevel(Level.WARN) //Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF) if (args.length != 2) { println("Usage: /path/to/spark/bin/spark-submit --driver-memory 2g --class com.javachen.spark.examples.mllib.ScalaMovieLensALS " + "target/scala-*/movielens-als-ssembly-*.jar movieLensHomeDir personalRatingsFile") sys.exit(1) } // set up environment val conf = new SparkConf().setAppName("ScalaMovieLensALS") val sc = new SparkContext(conf) // load ratings and movie titles val ratings = sc.textFile(args(0) + "/ratings.dat").map { line => val fields = line.split("::") // format: (timestamp % 10, Rating(userId, movieId, rating)) (fields(3).toLong % 10, Rating(fields(0).toInt, fields(1).toInt, fields(2).toDouble)) } val movies = sc.textFile(args(0) + "/movies.dat").map { line => val fields = line.split("::") // format: (movieId, movieName) (fields(0).toInt, fields(1)) }.collect().toMap val numRatings = ratings.count() val numUsers = ratings.map(_._2.user).distinct().count() val numMovies = ratings.map(_._2.product).distinct().count() println(s"Got $numRatings ratings from $numUsers users on $numMovies movies.") //get ratings of user on top 50 popular movies val mostRatedMovieIds = ratings.map(_._2.product) //extract movieId .countByValue //count ratings per movie .toSeq //convert map to seq .sortBy(-_._2) //sort by rating count in decreasing order .take(50) //take 50 most rated .map(_._1) //get movie ids val random = new Random(0) val selectedMovies = mostRatedMovieIds.filter(x => random.nextDouble() < 0.2) .map(x => (x, movies(x))) .toSeq val myRatings = elicitateRatings(selectedMovies) //convert received ratings to RDD[Rating], now this can be worked in parallel val myRatingsRDD = sc.parallelize(myRatings) // split ratings into train (60%), validation (20%), and test (20%) based on the // last digit of the timestamp, add myRatings to train, and cache them val numPartitions = 4 val training = ratings.filter(x => x._1 < 6).values.union(myRatingsRDD).repartition(numPartitions).cache() val validation = ratings.filter(x => x._1 >= 6 && x._1 < 8).values.repartition(numPartitions).cache() val test = ratings.filter(x => x._1 >= 8).values.cache() val numTraining = training.count() val numValidation = validation.count() val numTest = test.count() println(s"Training: $numTraining, validation: $numValidation, test: $numTest") // train models and evaluate them on the validation set val ranks = List(8, 10, 12) val lambdas = List(0.1, 1.0, 10.0) val numIterations = List(10, 20) var bestModel: Option[MatrixFactorizationModel] = None var bestValidationRmse = Double.MaxValue var bestRank = 0 var bestLambda = -1.0 var bestNumIter = -1 for (rank <- ranks; lambda <- lambdas; numIter <- numIterations) { //learn model for these parameter val model = ALS.train(training, rank, numIter, lambda) val validationRmse = computeRmse(model, validation) println(s"RMSE (validation) = $validationRmse for the model trained with rank = $rank , lambda = $lambda , and numIter = $numIter .") if (validationRmse < bestValidationRmse) { bestModel = Some(model) bestValidationRmse = validationRmse bestRank = rank bestLambda = lambda bestNumIter = numIter } } // evaluate the best model on the test set val testRmse = computeRmse(bestModel.get, test) println(s"The best model was trained with rank = $bestRank and lambda = $bestLambda , and numIter = $bestNumIter , and its RMSE on the test set is $testRmse .") //find best movies for the user val myRatedMovieIds = myRatings.map(_.product).toSet //generate candidates after taking out already rated movies val candidates = sc.parallelize(movies.keys.filter(!myRatedMovieIds.contains(_)).toSeq) val recommendations = bestModel.get.predict(candidates.map((0, _))).collect.sortBy(-_.rating).take(50) var i = 1 println("Movies recommendation for you: ") recommendations.foreach { r => println("%2d".format(i) + ": " + movies(r.product)) i += 1 } // create a naive baseline and compare it with the best model val meanRating = training.union(validation).map(_.rating).mean val baselineRmse = math.sqrt(test.map(x => (meanRating - x.rating) * (meanRating - x.rating)).mean) val improvement = (baselineRmse - testRmse) / baselineRmse * 100 println("The best model improves the baseline by " + "%1.2f".format(improvement) + "%.") // clean up sc.stop() } /** Compute RMSE (Root Mean Squared Error). */ def computeRmse(model: MatrixFactorizationModel, data: RDD[Rating]) = { val usersProducts = data.map { case Rating(user, product, rate) => (user, product) } val predictions = model.predict(usersProducts).map { case Rating(user, product, rate) => ((user, product), rate) } val ratesAndPreds = data.map { case Rating(user, product, rate) => ((user, product), rate) }.join(predictions).sortByKey() math.sqrt(ratesAndPreds.map { case ((user, product), (r1, r2)) => val err = (r1 - r2) err * err }.mean()) } /** Elicitate ratings from commandline **/ def elicitateRatings(movies: Seq[(Int, String)]) = { val prompt = "Please rate following movie (1-5(best), or 0 if not seen):" println(prompt) val ratings = movies.flatMap { x => var rating: Option[Rating] = None var valid = false while (!valid) { print(x._2 + ": ") try { val r = Console.readInt if (r < 0 || r > 5) { println(prompt) } else { valid = true if (r > 0) { rating = Some(Rating(0, x._1, r)) } } } catch { case e: Exception => println(prompt) } } rating match { case Some(r) => Iterator(r) case None => Iterator.empty } } //end flatMap if (ratings.isEmpty) { error("No rating provided") } else { ratings } } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/sparksql/ScalaSparkSQLByReflection.scala
package com.javachen.spark.examples.sparksql import org.apache.spark.{SparkConf, SparkContext} object ScalaSparkSQLByReflection { // Define the schema using a case class. // Note: Case classes in Scala 2.10 can support only up to 22 fields. To work around this limit, // you can use custom classes that implement the Product interface. case class People(name: String, age: Int) def main(args: Array[String]) { val sc = new SparkContext(new SparkConf().setAppName("ScalaSparkSQL")) val sqlContext = new org.apache.spark.sql.SQLContext(sc) // this is used to implicitly convert an RDD to a DataFrame. import sqlContext.implicits._ // Create an RDD of People objects and register it as a table. val people = sc.textFile("people.txt").map(_.split(",")).map(p => People(p(0), p(1).trim.toInt)).toDF() people.registerTempTable("people") // SQL statements can be run by using the sql methods provided by sqlContext. val teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19") // The results of SQL queries are DataFrames and support all the normal RDD operations. // The columns of a row in the result can be accessed by ordinal. teenagers.map(t => "Name: " + t(0)).collect().foreach(println) people.saveAsParquetFile("people.parquet") val parquetFile = sqlContext.parquetFile("people.parquet") } }
lucienh/learning-spark
src/main/scala/com/javachen/spark/examples/rdd/GroupByAction.scala
<filename>src/main/scala/com/javachen/spark/examples/rdd/GroupByAction.scala package com.javachen.spark.examples.rdd import org.apache.spark.SparkContext import org.apache.spark.RangePartitioner object GroupByAction { def main(args: Array[String]) { val sc = new SparkContext("local", "GroupByAction Test") val data = Array[(String, Int)](("A1", 1), ("A2", 2), ("B1", 6), ("A2", 4), ("B1", 3), ("B1", 5)) val pairs = sc.parallelize(data, 3) // output: // (A1,1) // (A2,2) // // (B1,6) // (A2,4) // // (B1,3) // (B1,5) pairs.foreach(println) val result1 = pairs.groupBy(K => K._1) val result2 = pairs.groupBy((K : (String, Int)) => K._1, 1) val result3 = pairs.groupBy((K : (String, Int)) => K._1, new RangePartitioner(3, pairs)) // output of result1: // (A1,ArrayBuffer((A1,1))) // // (B1,ArrayBuffer((B1,6), (B1,3), (B1,5))) // (A2,ArrayBuffer((A2,2), (A2,4))) result1.foreach(println) // output of result2: // (A1,ArrayBuffer((A1,1))) // (B1,ArrayBuffer((B1,6), (B1,3), (B1,5))) // (A2,ArrayBuffer((A2,2), (A2,4))) result2.foreach(println) // output of result3: // (A1,ArrayBuffer((A1,1))) // (A2,ArrayBuffer((A2,2), (A2,4))) // // (B1,ArrayBuffer((B1,6), (B1,3), (B1,5))) result3.foreach(println) } }
flaviostutz/spark-scala-hdfs-docker-example
app/src/main/scala/app/Main.scala
<gh_stars>0 package app import org.apache.spark.SparkConf import org.apache.spark.sql.SparkSession import org.apache.spark.sql.SaveMode import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.FileSystem import org.apache.hadoop.fs.Path object Main extends App { val hdfsPrefix = sys.env("HDFS_URL") println("Copying sample file to HDFS...") val hadoopConf = new Configuration() hadoopConf.set("fs.defaultFS", sys.env("HDFS_URL")) val hdfs = FileSystem.get(hadoopConf) val srcPath = new Path("/app/people.csv") val destPath = new Path("hdfs:///test/people.csv") hdfs.copyFromLocalFile(srcPath, destPath) println("Initializing Spark context...") // initialise spark context val conf = new SparkConf().setAppName("Example App") val spark: SparkSession = SparkSession.builder.config(conf).getOrCreate() println("Load CSV using Dataframe") val df = spark.read .format("com.databricks.spark.csv") .option("inferSchema", "true") .option("header", "true") .load(hdfsPrefix + "/test/people.csv") df.describe().show() println("Perform some SQL over CSV contents") df.createOrReplaceTempView("people") val df2 = spark.sql("SELECT * FROM people WHERE Height BETWEEN 68 AND 71") df2.describe().show() println("Save CSV using Dataframe") df2.repartition(5).write .format("com.databricks.spark.csv") .option("header", "true") .mode("overwrite") .save(hdfsPrefix + "/test/people-result.csv") // do stuff println("************") println("Hello, world!") val rdd = spark.sparkContext.parallelize(Array(1 to 10)) rdd.count() println("************") // terminate spark context spark.stop() }
aurlien/kafka
core/src/main/scala/kafka/log/LogValidator.scala
<gh_stars>0 /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.nio.ByteBuffer import kafka.api.{ApiVersion, KAFKA_2_1_IV0} import kafka.common.LongRef import kafka.message.{CompressionCodec, NoCompressionCodec, ZStdCompressionCodec} import kafka.server.BrokerTopicStats import kafka.utils.Logging import org.apache.kafka.common.errors.{CorruptRecordException, InvalidTimestampException, UnsupportedCompressionTypeException, UnsupportedForMessageFormatException} import org.apache.kafka.common.record.{AbstractRecords, BufferSupplier, CompressionType, MemoryRecords, Record, RecordBatch, RecordConversionStats, TimestampType} import org.apache.kafka.common.InvalidRecordException import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.utils.Time import scala.collection.{Seq, mutable} import scala.collection.JavaConverters._ private[kafka] object LogValidator extends Logging { /** * Update the offsets for this message set and do further validation on messages including: * 1. Messages for compacted topics must have keys * 2. When magic value >= 1, inner messages of a compressed message set must have monotonically increasing offsets * starting from 0. * 3. When magic value >= 1, validate and maybe overwrite timestamps of messages. * 4. Declared count of records in DefaultRecordBatch must match number of valid records contained therein. * * This method will convert messages as necessary to the topic's configured message format version. If no format * conversion or value overwriting is required for messages, this method will perform in-place operations to * avoid expensive re-compression. * * Returns a ValidationAndOffsetAssignResult containing the validated message set, maximum timestamp, the offset * of the shallow message with the max timestamp and a boolean indicating whether the message sizes may have changed. */ private[kafka] def validateMessagesAndAssignOffsets(records: MemoryRecords, topicPartition: TopicPartition, offsetCounter: LongRef, time: Time, now: Long, sourceCodec: CompressionCodec, targetCodec: CompressionCodec, compactedTopic: Boolean, magic: Byte, timestampType: TimestampType, timestampDiffMaxMs: Long, partitionLeaderEpoch: Int, isFromClient: Boolean, interBrokerProtocolVersion: ApiVersion, brokerTopicStats: BrokerTopicStats): ValidationAndOffsetAssignResult = { if (sourceCodec == NoCompressionCodec && targetCodec == NoCompressionCodec) { // check the magic value if (!records.hasMatchingMagic(magic)) convertAndAssignOffsetsNonCompressed(records, topicPartition, offsetCounter, compactedTopic, time, now, timestampType, timestampDiffMaxMs, magic, partitionLeaderEpoch, isFromClient, brokerTopicStats) else // Do in-place validation, offset assignment and maybe set timestamp assignOffsetsNonCompressed(records, topicPartition, offsetCounter, now, compactedTopic, timestampType, timestampDiffMaxMs, partitionLeaderEpoch, isFromClient, magic, brokerTopicStats) } else { validateMessagesAndAssignOffsetsCompressed(records, topicPartition, offsetCounter, time, now, sourceCodec, targetCodec, compactedTopic, magic, timestampType, timestampDiffMaxMs, partitionLeaderEpoch, isFromClient, interBrokerProtocolVersion, brokerTopicStats) } } private[kafka] def getFirstBatchAndMaybeValidateNoMoreBatches(records: MemoryRecords, sourceCodec: CompressionCodec): RecordBatch = { val batchIterator = records.batches.iterator if (!batchIterator.hasNext) { throw new InvalidRecordException("Record batch has no batches at all") } val batch = batchIterator.next() // if the format is v2 and beyond, or if the messages are compressed, we should check there's only one batch. if (batch.magic() >= RecordBatch.MAGIC_VALUE_V2 || sourceCodec != NoCompressionCodec) { if (batchIterator.hasNext) { throw new InvalidRecordException("Compressed outer record has more than one batch") } } batch } private def validateBatch(topicPartition: TopicPartition, firstBatch: RecordBatch, batch: RecordBatch, isFromClient: Boolean, toMagic: Byte, brokerTopicStats: BrokerTopicStats): Unit = { // batch magic byte should have the same magic as the first batch if (firstBatch.magic() != batch.magic()) { brokerTopicStats.allTopicsStats.invalidMagicNumberRecordsPerSec.mark() throw new InvalidRecordException(s"Batch magic ${batch.magic()} is not the same as the first batch'es magic byte ${firstBatch.magic()} in topic partition $topicPartition.") } if (isFromClient) { if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) { val countFromOffsets = batch.lastOffset - batch.baseOffset + 1 if (countFromOffsets <= 0) { brokerTopicStats.allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() throw new InvalidRecordException(s"Batch has an invalid offset range: [${batch.baseOffset}, ${batch.lastOffset}] in topic partition $topicPartition.") } // v2 and above messages always have a non-null count val count = batch.countOrNull if (count <= 0) { brokerTopicStats.allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() throw new InvalidRecordException(s"Invalid reported count for record batch: $count in topic partition $topicPartition.") } if (countFromOffsets != batch.countOrNull) { brokerTopicStats.allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() throw new InvalidRecordException(s"Inconsistent batch offset range [${batch.baseOffset}, ${batch.lastOffset}] " + s"and count of records $count in topic partition $topicPartition.") } } if (batch.hasProducerId && batch.baseSequence < 0) { brokerTopicStats.allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() throw new InvalidRecordException(s"Invalid sequence number ${batch.baseSequence} in record batch " + s"with producerId ${batch.producerId} in topic partition $topicPartition.") } if (batch.isControlBatch) { brokerTopicStats.allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() throw new InvalidRecordException(s"Clients are not allowed to write control records in topic partition $topicPartition.") } } if (batch.isTransactional && toMagic < RecordBatch.MAGIC_VALUE_V2) throw new UnsupportedForMessageFormatException(s"Transactional records cannot be used with magic version $toMagic") if (batch.hasProducerId && toMagic < RecordBatch.MAGIC_VALUE_V2) throw new UnsupportedForMessageFormatException(s"Idempotent records cannot be used with magic version $toMagic") } private def validateRecord(batch: RecordBatch, topicPartition: TopicPartition, record: Record, now: Long, timestampType: TimestampType, timestampDiffMaxMs: Long, compactedTopic: Boolean, brokerTopicStats: BrokerTopicStats): Unit = { if (!record.hasMagic(batch.magic)) { brokerTopicStats.allTopicsStats.invalidMagicNumberRecordsPerSec.mark() throw new InvalidRecordException(s"Log record $record's magic does not match outer magic ${batch.magic} in topic partition $topicPartition.") } // verify the record-level CRC only if this is one of the deep entries of a compressed message // set for magic v0 and v1. For non-compressed messages, there is no inner record for magic v0 and v1, // so we depend on the batch-level CRC check in Log.analyzeAndValidateRecords(). For magic v2 and above, // there is no record-level CRC to check. if (batch.magic <= RecordBatch.MAGIC_VALUE_V1 && batch.isCompressed) { try { record.ensureValid() } catch { case e: InvalidRecordException => brokerTopicStats.allTopicsStats.invalidMessageCrcRecordsPerSec.mark() throw new CorruptRecordException(e.getMessage + s" in topic partition $topicPartition.") } } validateKey(record, topicPartition, compactedTopic, brokerTopicStats) validateTimestamp(batch, record, now, timestampType, timestampDiffMaxMs) } private def convertAndAssignOffsetsNonCompressed(records: MemoryRecords, topicPartition: TopicPartition, offsetCounter: LongRef, compactedTopic: Boolean, time: Time, now: Long, timestampType: TimestampType, timestampDiffMaxMs: Long, toMagicValue: Byte, partitionLeaderEpoch: Int, isFromClient: Boolean, brokerTopicStats: BrokerTopicStats): ValidationAndOffsetAssignResult = { val startNanos = time.nanoseconds val sizeInBytesAfterConversion = AbstractRecords.estimateSizeInBytes(toMagicValue, offsetCounter.value, CompressionType.NONE, records.records) val (producerId, producerEpoch, sequence, isTransactional) = { val first = records.batches.asScala.head (first.producerId, first.producerEpoch, first.baseSequence, first.isTransactional) } val newBuffer = ByteBuffer.allocate(sizeInBytesAfterConversion) val builder = MemoryRecords.builder(newBuffer, toMagicValue, CompressionType.NONE, timestampType, offsetCounter.value, now, producerId, producerEpoch, sequence, isTransactional, partitionLeaderEpoch) val firstBatch = getFirstBatchAndMaybeValidateNoMoreBatches(records, NoCompressionCodec) for (batch <- records.batches.asScala) { validateBatch(topicPartition, firstBatch, batch, isFromClient, toMagicValue, brokerTopicStats) for (record <- batch.asScala) { validateRecord(batch, topicPartition, record, now, timestampType, timestampDiffMaxMs, compactedTopic, brokerTopicStats) builder.appendWithOffset(offsetCounter.getAndIncrement(), record) } } val convertedRecords = builder.build() val info = builder.info val recordConversionStats = new RecordConversionStats(builder.uncompressedBytesWritten, builder.numRecords, time.nanoseconds - startNanos) ValidationAndOffsetAssignResult( validatedRecords = convertedRecords, maxTimestamp = info.maxTimestamp, shallowOffsetOfMaxTimestamp = info.shallowOffsetOfMaxTimestamp, messageSizeMaybeChanged = true, recordConversionStats = recordConversionStats) } private def assignOffsetsNonCompressed(records: MemoryRecords, topicPartition: TopicPartition, offsetCounter: LongRef, now: Long, compactedTopic: Boolean, timestampType: TimestampType, timestampDiffMaxMs: Long, partitionLeaderEpoch: Int, isFromClient: Boolean, magic: Byte, brokerTopicStats: BrokerTopicStats): ValidationAndOffsetAssignResult = { var maxTimestamp = RecordBatch.NO_TIMESTAMP var offsetOfMaxTimestamp = -1L val initialOffset = offsetCounter.value val firstBatch = getFirstBatchAndMaybeValidateNoMoreBatches(records, NoCompressionCodec) for (batch <- records.batches.asScala) { validateBatch(topicPartition, firstBatch, batch, isFromClient, magic, brokerTopicStats) var maxBatchTimestamp = RecordBatch.NO_TIMESTAMP var offsetOfMaxBatchTimestamp = -1L for (record <- batch.asScala) { validateRecord(batch, topicPartition, record, now, timestampType, timestampDiffMaxMs, compactedTopic, brokerTopicStats) val offset = offsetCounter.getAndIncrement() if (batch.magic > RecordBatch.MAGIC_VALUE_V0 && record.timestamp > maxBatchTimestamp) { maxBatchTimestamp = record.timestamp offsetOfMaxBatchTimestamp = offset } } if (batch.magic > RecordBatch.MAGIC_VALUE_V0 && maxBatchTimestamp > maxTimestamp) { maxTimestamp = maxBatchTimestamp offsetOfMaxTimestamp = offsetOfMaxBatchTimestamp } batch.setLastOffset(offsetCounter.value - 1) if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) batch.setPartitionLeaderEpoch(partitionLeaderEpoch) if (batch.magic > RecordBatch.MAGIC_VALUE_V0) { if (timestampType == TimestampType.LOG_APPEND_TIME) batch.setMaxTimestamp(TimestampType.LOG_APPEND_TIME, now) else batch.setMaxTimestamp(timestampType, maxBatchTimestamp) } } if (timestampType == TimestampType.LOG_APPEND_TIME) { maxTimestamp = now if (magic >= RecordBatch.MAGIC_VALUE_V2) offsetOfMaxTimestamp = offsetCounter.value - 1 else offsetOfMaxTimestamp = initialOffset } ValidationAndOffsetAssignResult( validatedRecords = records, maxTimestamp = maxTimestamp, shallowOffsetOfMaxTimestamp = offsetOfMaxTimestamp, messageSizeMaybeChanged = false, recordConversionStats = RecordConversionStats.EMPTY) } /** * We cannot do in place assignment in one of the following situations: * 1. Source and target compression codec are different * 2. When the target magic is not equal to batches' magic, meaning format conversion is needed. * 3. When the target magic is equal to V0, meaning absolute offsets need to be re-assigned. */ def validateMessagesAndAssignOffsetsCompressed(records: MemoryRecords, topicPartition: TopicPartition, offsetCounter: LongRef, time: Time, now: Long, sourceCodec: CompressionCodec, targetCodec: CompressionCodec, compactedTopic: Boolean, toMagic: Byte, timestampType: TimestampType, timestampDiffMaxMs: Long, partitionLeaderEpoch: Int, isFromClient: Boolean, interBrokerProtocolVersion: ApiVersion, brokerTopicStats: BrokerTopicStats): ValidationAndOffsetAssignResult = { if (targetCodec == ZStdCompressionCodec && interBrokerProtocolVersion < KAFKA_2_1_IV0) throw new UnsupportedCompressionTypeException("Produce requests to inter.broker.protocol.version < 2.1 broker " + "are not allowed to use ZStandard compression") // No in place assignment situation 1 var inPlaceAssignment = sourceCodec == targetCodec var maxTimestamp = RecordBatch.NO_TIMESTAMP val expectedInnerOffset = new LongRef(0) val validatedRecords = new mutable.ArrayBuffer[Record] var uncompressedSizeInBytes = 0 // Assume there's only one batch with compressed memory records; otherwise, return InvalidRecordException // One exception though is that with format smaller than v2, if sourceCodec is noCompression, then each batch is actually // a single record so we'd need to special handle it by creating a single wrapper batch that includes all the records val firstBatch = getFirstBatchAndMaybeValidateNoMoreBatches(records, sourceCodec) // No in place assignment situation 2 and 3: we only need to check for the first batch because: // 1. For most cases (compressed records, v2, for example), there's only one batch anyways. // 2. For cases that there may be multiple batches, all batches' magic should be the same. if (firstBatch.magic != toMagic || toMagic == RecordBatch.MAGIC_VALUE_V0) inPlaceAssignment = false // Do not compress control records unless they are written compressed if (sourceCodec == NoCompressionCodec && firstBatch.isControlBatch) inPlaceAssignment = true val batches = records.batches.asScala for (batch <- batches) { validateBatch(topicPartition, firstBatch, batch, isFromClient, toMagic, brokerTopicStats) uncompressedSizeInBytes += AbstractRecords.recordBatchHeaderSizeInBytes(toMagic, batch.compressionType()) // if we are on version 2 and beyond, and we know we are going for in place assignment, // then we can optimize the iterator to skip key / value / headers since they would not be used at all val recordsIterator = if (inPlaceAssignment && firstBatch.magic >= RecordBatch.MAGIC_VALUE_V2) batch.skipKeyValueIterator(BufferSupplier.NO_CACHING) else batch.streamingIterator(BufferSupplier.NO_CACHING) try { for (record <- batch.asScala) { if (sourceCodec != NoCompressionCodec && record.isCompressed) throw new InvalidRecordException("Compressed outer record should not have an inner record with a " + s"compression attribute set: $record") validateRecord(batch, topicPartition, record, now, timestampType, timestampDiffMaxMs, compactedTopic, brokerTopicStats) uncompressedSizeInBytes += record.sizeInBytes() if (batch.magic > RecordBatch.MAGIC_VALUE_V0 && toMagic > RecordBatch.MAGIC_VALUE_V0) { // inner records offset should always be continuous val expectedOffset = expectedInnerOffset.getAndIncrement() if (record.offset != expectedOffset) { brokerTopicStats.allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() throw new InvalidRecordException(s"Inner record $record inside the compressed record batch does not have incremental offsets, expected offset is $expectedOffset in topic partition $topicPartition.") } if (record.timestamp > maxTimestamp) maxTimestamp = record.timestamp } validatedRecords += record } } finally { recordsIterator.close() } } if (!inPlaceAssignment) { val (producerId, producerEpoch, sequence, isTransactional) = { // note that we only reassign offsets for requests coming straight from a producer. For records with magic V2, // there should be exactly one RecordBatch per request, so the following is all we need to do. For Records // with older magic versions, there will never be a producer id, etc. val first = records.batches.asScala.head (first.producerId, first.producerEpoch, first.baseSequence, first.isTransactional) } buildRecordsAndAssignOffsets(toMagic, offsetCounter, time, timestampType, CompressionType.forId(targetCodec.codec), now, validatedRecords, producerId, producerEpoch, sequence, isTransactional, partitionLeaderEpoch, isFromClient, uncompressedSizeInBytes) } else { // we can update the batch only and write the compressed payload as is; // again we assume only one record batch within the compressed set val batch = records.batches.iterator.next() val lastOffset = offsetCounter.addAndGet(validatedRecords.size) - 1 batch.setLastOffset(lastOffset) if (timestampType == TimestampType.LOG_APPEND_TIME) maxTimestamp = now if (toMagic >= RecordBatch.MAGIC_VALUE_V1) batch.setMaxTimestamp(timestampType, maxTimestamp) if (toMagic >= RecordBatch.MAGIC_VALUE_V2) batch.setPartitionLeaderEpoch(partitionLeaderEpoch) val recordConversionStats = new RecordConversionStats(uncompressedSizeInBytes, 0, 0) ValidationAndOffsetAssignResult(validatedRecords = records, maxTimestamp = maxTimestamp, shallowOffsetOfMaxTimestamp = lastOffset, messageSizeMaybeChanged = false, recordConversionStats = recordConversionStats) } } private def buildRecordsAndAssignOffsets(magic: Byte, offsetCounter: LongRef, time: Time, timestampType: TimestampType, compressionType: CompressionType, logAppendTime: Long, validatedRecords: Seq[Record], producerId: Long, producerEpoch: Short, baseSequence: Int, isTransactional: Boolean, partitionLeaderEpoch: Int, isFromClient: Boolean, uncompressedSizeInBytes: Int): ValidationAndOffsetAssignResult = { val startNanos = time.nanoseconds val estimatedSize = AbstractRecords.estimateSizeInBytes(magic, offsetCounter.value, compressionType, validatedRecords.asJava) val buffer = ByteBuffer.allocate(estimatedSize) val builder = MemoryRecords.builder(buffer, magic, compressionType, timestampType, offsetCounter.value, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, partitionLeaderEpoch) validatedRecords.foreach { record => builder.appendWithOffset(offsetCounter.getAndIncrement(), record) } val records = builder.build() val info = builder.info // This is not strictly correct, it represents the number of records where in-place assignment is not possible // instead of the number of records that were converted. It will over-count cases where the source and target are // message format V0 or if the inner offsets are not consecutive. This is OK since the impact is the same: we have // to rebuild the records (including recompression if enabled). val conversionCount = builder.numRecords val recordConversionStats = new RecordConversionStats(uncompressedSizeInBytes + builder.uncompressedBytesWritten, conversionCount, time.nanoseconds - startNanos) ValidationAndOffsetAssignResult( validatedRecords = records, maxTimestamp = info.maxTimestamp, shallowOffsetOfMaxTimestamp = info.shallowOffsetOfMaxTimestamp, messageSizeMaybeChanged = true, recordConversionStats = recordConversionStats) } private def validateKey(record: Record, topicPartition: TopicPartition, compactedTopic: Boolean, brokerTopicStats: BrokerTopicStats) { if (compactedTopic && !record.hasKey) { brokerTopicStats.allTopicsStats.noKeyCompactedTopicRecordsPerSec.mark() throw new InvalidRecordException(s"Compacted topic cannot accept message without key in topic partition $topicPartition.") } } /** * This method validates the timestamps of a message. * If the message is using create time, this method checks if it is within acceptable range. */ private def validateTimestamp(batch: RecordBatch, record: Record, now: Long, timestampType: TimestampType, timestampDiffMaxMs: Long): Unit = { if (timestampType == TimestampType.CREATE_TIME && record.timestamp != RecordBatch.NO_TIMESTAMP && math.abs(record.timestamp - now) > timestampDiffMaxMs) throw new InvalidTimestampException(s"Timestamp ${record.timestamp} of message with offset ${record.offset} is " + s"out of range. The timestamp should be within [${now - timestampDiffMaxMs}, ${now + timestampDiffMaxMs}]") if (batch.timestampType == TimestampType.LOG_APPEND_TIME) throw new InvalidTimestampException(s"Invalid timestamp type in message $record. Producer should not set " + s"timestamp type to LogAppendTime.") } case class ValidationAndOffsetAssignResult(validatedRecords: MemoryRecords, maxTimestamp: Long, shallowOffsetOfMaxTimestamp: Long, messageSizeMaybeChanged: Boolean, recordConversionStats: RecordConversionStats) }
objektwerks/spark.area.of.interest
src/main/scala/aoi/AreaOfInterestJob.scala
<reponame>objektwerks/spark.area.of.interest<gh_stars>0 package aoi import com.typesafe.config.{Config, ConfigFactory} import org.apache.log4j.Logger import org.apache.spark.SparkConf import org.apache.spark.sql.SparkSession import scala.util.Try object AreaOfInterestJob { import AreaOfInterest._ def main(args: Array[String]): Unit = { val logger = Logger.getLogger(getClass.getSimpleName) val conf = ConfigFactory.load("job.conf").getConfig("job") val areaOfInterestRadiusInKilometers = Try(args(0).toDouble).getOrElse(25.0) val hitDaysHence = Try(daysToEpochMillis(args(1).toLong)).getOrElse(daysToEpochMillis(730)) logger.info(s"*** areaOfInterestRadiusInKilometers: $areaOfInterestRadiusInKilometers") logger.info(s"*** hitDaysHence: $hitDaysHence") makeSparkEventLogDir(conf.getString("spark.eventLog.dir")) runJob(logger, conf, areaOfInterestRadiusInKilometers, hitDaysHence) } def runJob(logger: Logger, conf: Config, areaOfInterestRadiusInKilometers: Double, hitDaysHence: Long): Unit = { val sparkConf = new SparkConf() .setMaster(conf.getString("master")) .setAppName(conf.getString("name")) .set("spark.eventLog.enabled", conf.getBoolean("spark.eventLog.enabled").toString) .set("spark.eventLog.dir", conf.getString("spark.eventLog.dir")) .set("spark.serializer", conf.getString("spark.serializer")) .set("spark.kryo.registrationRequired", "true") .registerKryoClasses( Array( classOf[AreaOfInterest], classOf[Array[AreaOfInterest]], classOf[Hit], classOf[Array[Hit]], classOf[HitToAreaOfInterests], classOf[Array[HitToAreaOfInterests]] )) val sparkSession = SparkSession .builder .config(sparkConf) .getOrCreate() logger.info("*** AreaOfInterestApp Spark session built. Press Ctrl C to stop.") sys.addShutdownHook { sparkSession.stop logger.info("*** AreaOfInterestApp Spark session stopped.") } import sparkSession.implicits._ val broadcastAreasOfInterest = sparkSession.sparkContext.broadcast( sparkSession .read .format("csv") .option("header", true) .option("delimiter", ",") .schema(areaOfInterestStructType) .load(conf.getString("aoi")) .as[AreaOfInterest] .collect ) val hits = sparkSession .readStream .option("basePath", conf.getString("hits")) .option("header", true) .option("delimiter", ",") .schema(hitStructType) .csv(conf.getString("hits")) .as[Hit] .filter(hit => hit.utc > hitDaysHence) .map(hit => mapHitToAreaOfInterests(broadcastAreasOfInterest.value, areaOfInterestRadiusInKilometers, hit)) .as[HitToAreaOfInterests] .writeStream .format("console") .option("truncate", "false") .start hits.awaitTermination () } }
objektwerks/spark.area.of.interest
build.sbt
name := "spark.area.of.interest" organization := "objektwerks" version := "0.1-SNAPSHOT" scalaVersion := "2.13.7" libraryDependencies ++= { val sparkVersion = "3.2.0" Seq( "org.apache.spark" %% "spark-core" % sparkVersion, "org.apache.spark" %% "spark-streaming" % sparkVersion, "org.apache.spark" %% "spark-sql" % sparkVersion, "com.typesafe" % "config" % "1.4.1" ) }
objektwerks/spark.area.of.interest
src/main/scala/aoi/AreaOfInterest.scala
package aoi import java.lang.Math.{atan2, cos, sin, sqrt} import java.time.{Duration, Instant} import org.apache.log4j.Logger import org.apache.spark.sql.Encoders import scala.collection.mutable case class AreaOfInterest(id: String, latitude: Double, longitude: Double) case class Hit(id: String, utc: Long, latitude: Double, longitude: Double) case class HitToAreaOfInterests(hitId: String, aoiIds: Array[String]) object AreaOfInterest { private val logger = Logger.getLogger(getClass.getSimpleName) private val earthRadiusInKilometers = 6371 val areaOfInterestStructType = Encoders.product[AreaOfInterest].schema val hitStructType = Encoders.product[Hit].schema val hitToAreaOfInterests = Encoders.product[HitToAreaOfInterests] def daysToEpochMillis(days: Long): Long = Instant.now.minus(Duration.ofDays(days)).toEpochMilli def mapHitToAreaOfInterests(areaOfInterests: Array[AreaOfInterest], areaOfInterestRadiusInKilometers: Double, hit: Hit): HitToAreaOfInterests = { val buffer = new mutable.ArrayBuffer[String] areaOfInterests.foreach { areaOfInterest => if (isHitWithinAreaOfInterest(hit, areaOfInterest, areaOfInterestRadiusInKilometers)) buffer += areaOfInterest.id () } HitToAreaOfInterests(hit.id, buffer.toArray) } /** * Haversine Algo */ private def isHitWithinAreaOfInterest(hit: Hit, areaOfInterest: AreaOfInterest, areaOfInterestRadiusInKilometers: Double): Boolean = { val deltaLatitude = (hit.latitude - areaOfInterest.latitude).toRadians val deltaLongitude = (hit.longitude - areaOfInterest.longitude).toRadians val areaOfInterestLatitudeInRadians = areaOfInterest.latitude.toRadians val locationLatitudeInRadians = hit.latitude.toRadians val a = { sin(deltaLatitude / 2) * sin(deltaLatitude / 2) + sin(deltaLongitude / 2) * sin(deltaLongitude / 2) * cos(areaOfInterestLatitudeInRadians) * cos(locationLatitudeInRadians) } val c = 2 * atan2(sqrt(a), sqrt(1 - a)) val distanceBetweenHitAndAreaOfInterest = earthRadiusInKilometers * c val isHit = if (distanceBetweenHitAndAreaOfInterest < areaOfInterestRadiusInKilometers) true else false logger.info("--------------------------------------------------") logger.info(s"Hit = $isHit") logger.info(s"$hit") logger.info(s"$areaOfInterest") logger.info(s"Delta: $distanceBetweenHitAndAreaOfInterest") logger.info(s"Radius: $areaOfInterestRadiusInKilometers") logger.info("--------------------------------------------------") isHit } }
danischroeter/kamon-prometheus
src/main/scala/kamon/prometheus/MetricOverrideReporter.scala
<gh_stars>10-100 /* ========================================================================================= * Copyright © 2013-2018 the kamon project <http://kamon.io/> * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions * and limitations under the License. * ========================================================================================= */ package kamon package prometheus import com.typesafe.config.Config import kamon.metric.{Metric, MetricSnapshot, PeriodSnapshot} import kamon.module.MetricReporter import kamon.tag.{Tag, TagSet} import scala.collection.JavaConverters.{asScalaBufferConverter, mapAsScalaMapConverter} class MetricOverrideReporter(wrappedReporter: MetricReporter, config: Config = Kamon.config) extends MetricReporter { private var metricsMap: Map[String, MetricMapping] = getMetricMapping(config) override def reportPeriodSnapshot(snapshot: PeriodSnapshot): Unit = { val updatedSnapshot = snapshot.copy( histograms = snapshot.histograms.map(updateDistribution), timers = snapshot.timers.map(updateDistribution), rangeSamplers = snapshot.rangeSamplers.map(updateDistribution), gauges = snapshot.gauges.map(updateDistribution), counters = snapshot.counters.map(updateDistribution)) wrappedReporter.reportPeriodSnapshot(updatedSnapshot) } override def stop(): Unit = wrappedReporter.stop() override def reconfigure(config: Config): Unit = { metricsMap = getMetricMapping(config) wrappedReporter.reconfigure(config) } private def remapTags(tags: TagSet, mapping: MetricMapping): TagSet = { val remappedTags = TagSet.builder() tags.iterator().foreach(tag => { if(!mapping.tagsToDelete.contains(tag.key)) { remappedTags.add(mapping.tagsToRename.getOrElse(tag.key, tag.key), Tag.unwrapValue(tag).toString) } }) remappedTags.build() } private def updateDistribution[T <: Metric.Settings, U](metric: MetricSnapshot[T, U]): MetricSnapshot[T, U] = { metricsMap.get(metric.name).map(mapping => { val mappedInstruments = if(mapping.tagsToRename.isEmpty && mapping.tagsToDelete.isEmpty) metric.instruments else { metric.instruments.map(inst => { inst.copy(tags = remapTags(inst.tags, mapping)) }) } metric.copy( name = mapping.newName.getOrElse(metric.name), instruments = mappedInstruments ) }).getOrElse(metric) } // // private def updateValue(metricValue: MetricValue): MetricValue = { // val mappingForValue = metricsMap.get(metricValue.name) // // metricValue.copy( // name = mappingForValue.flatMap(_.newName).getOrElse(metricValue.name), // tags = mappingForValue.map( // mapping => remapTags(metricValue.tags, mapping) // ).getOrElse(metricValue.tags) // ) // } private def getMetricMapping(config: Config): Map[String, MetricMapping] = { val mappingConfig = config.getConfig("kamon.prometheus.metric-overrides") mappingConfig.configurations.map { case (name, config) => (name, MetricMapping( if (config.hasPath("name")) Some(config.getString("name")) else None, if (config.hasPath("delete-tags")) config.getStringList("delete-tags").asScala.toSet else Set.empty, if (config.hasPath("rename-tags")) config.getObject("rename-tags").unwrapped().asScala.toMap .map { case (tagName, value) => (tagName, value.toString) } else Map.empty )) } } private case class MetricMapping( newName: Option[String], tagsToDelete: Set[String], tagsToRename: Map[String, String] ) }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/kernels/AnovaKernel.scala
package io.github.mandar2812.dynaml.kernels import breeze.linalg.{DenseMatrix, DenseVector} /** * @author mandar2812 * Annova Kernel */ class AnovaKernel(si: Double = 1.0, exp: Double = 4.0, d: Double = 2.0) extends SVMKernel[DenseMatrix[Double]] with LocalSVMKernel[DenseVector[Double]] with Serializable { override val hyper_parameters = List("sigma", "k", "degree") state = Map("sigma" -> si, "k" -> exp, "degree" -> d) private var sigma: Double = si private var k = exp private var degree = d def setsigma(b: Double): Unit = { this.sigma = b state += ("sigma" -> b) } def setk(kl: Double) = { this.k = kl state += ("k" -> kl) } override def evaluateAt( config: Map[String, Double])( x: DenseVector[Double], y: DenseVector[Double]): Double = { x.toArray .zip(y.toArray) .map{couple => math.exp(-1.0*config("degree")*config("sigma")*math.pow(math.pow(couple._1, config("k")) - math.pow(couple._2, config("k")),2)) }.sum } } class AnovaCovFunc(si: Double = 1.0, exp: Double = 2.0, d: Double = 2.0) extends LocalSVMKernel[Double] { override val hyper_parameters = List("sigma", "k", "degree") state = Map("sigma" -> si, "k" -> exp, "degree" -> d) private var sigma: Double = si private var k = exp private var degree = d def setsigma(b: Double): Unit = { this.sigma = b state += ("sigma" -> b) } def setk(kl: Double) = { this.k = kl state += ("k" -> kl) } override def evaluateAt(config: Map[String, Double])(x: Double, y: Double): Double = math.exp(-1.0*config("d")*config("sigma")*math.pow(math.pow(x, config("k")) - math.pow(y, config("k")),2)) }
amitkumarj441/DynaML
dynaml-notebook/src/main/scala-2.11/io/github/mandar2812/dynaml/zeppelin/DynaMLZeppelinInterpreter.scala
<gh_stars>0 package io.github.mandar2812.dynaml.zeppelin import java.nio.charset.Charset import java.util.Properties import ammonite.interp.{Parsers, Preprocessor} import ammonite.repl.Repl import ammonite.runtime.{History, Storage} import ammonite.util._ import ammonite.util.Util.{CodeSource, VersionedWrapperId} import fastparse.core.Parsed import io.github.mandar2812.dynaml.DynaZeppelin import io.github.mandar2812.dynaml.repl.{Defaults, DynaMLInterpreter} import org.apache.commons.io.output.ByteArrayOutputStream import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion import org.apache.zeppelin.interpreter.{Interpreter, InterpreterContext, InterpreterResult} import scala.collection.JavaConversions._ class DynaMLZeppelinInterpreter(properties: Properties) extends Interpreter(properties) { protected val outputBuffer = new ByteArrayOutputStream() protected val errorBuffer = new ByteArrayOutputStream() protected val dynaml_instance = new DynaZeppelin(outputStream = outputBuffer, errorStream = errorBuffer) protected var CURRENT_LINE : Int = 0 var lastException: Throwable = null var history = new History(Vector()) val maybe_interp = dynaml_instance.instantiateDynaZepplinInterpreter() var dynaml_interp: DynaMLInterpreter = null val storageBackend: Storage = new Storage.Folder(Defaults.ammoniteHome) override def cancel(interpreterContext: InterpreterContext) = { dynaml_interp.mainThread.interrupt() } override def getFormType = Interpreter.FormType.NATIVE override def getProgress(interpreterContext: InterpreterContext) = 0 override def open() = { if (maybe_interp.isRight) dynaml_interp = maybe_interp.right.get } override def close() = { } override def completion(buf: String, cursor: Int) = { val comp = dynaml_interp.compilerManager.complete(cursor, dynaml_interp.predefImports.toString(), buf) comp._2.zip(comp._3).map(s => new InterpreterCompletion(s._1, s._2)) } def addHistory(code: String) = if (code != "") { storageBackend.fullHistory() = storageBackend.fullHistory() :+ code history = history :+ code } override def interpret(s: String, interpreterContext: InterpreterContext) = { addHistory(s) /*val wrapperName = Name("cmd" + CURRENT_LINE) val fileName = wrapperName.encoded + ".sc" val result = for { blocks <- Preprocessor.splitScript(ammonite.interp.Interpreter.skipSheBangLine(s), fileName) codeSource = CodeSource( wrapperName, Seq(), Seq(Name("ammonite"), Name("$sess")), Some(dynaml_interp.wd/"(console)") ) metadata <- dynaml_interp.processAllScriptBlocks( blocks.map(_ => None), Res.Success(blocks), dynaml_interp.predefImports ++ dynaml_interp.frameImports, codeSource, (processed, indexedWrapperName) => dynaml_interp.evaluateLine( processed, dynaml_interp.printer, fileName, indexedWrapperName, silent = false, () => CURRENT_LINE += 1), autoImport = true, "" ) } yield { metadata } if(result.isSuccess) { val output = outputBuffer.toString(Charset.defaultCharset()) outputBuffer.reset() val resStr = result.flatMap(d => { Res(Some(d.blockInfo.map(blockm => blockm.finalImports.value.map(d => d.fromName.raw).mkString("\n")).mkString("\n")), "") }) new InterpreterResult(InterpreterResult.Code.SUCCESS, output) } else { new InterpreterResult(InterpreterResult.Code.ERROR, result.toString) }*/ Parsers.Splitter.parse(s) match { case Parsed.Success(value, idx) => val computation_output = dynaml_interp.processLine(s, value, CURRENT_LINE, false, () => CURRENT_LINE += 1) val output = outputBuffer.toString(Charset.defaultCharset()) val error = errorBuffer.toString(Charset.defaultCharset()) if(computation_output.isSuccess) { outputBuffer.reset() new InterpreterResult(InterpreterResult.Code.SUCCESS, output) } else { errorBuffer.reset() new InterpreterResult(InterpreterResult.Code.ERROR, "Syntax Error Mofo!") } case Parsed.Failure(_, index, extra) => new InterpreterResult(InterpreterResult.Code.ERROR, fastparse.core.ParseError.msg(extra.input, extra.traced.expected, index)) } } def evaluate(s: String) = { addHistory(s) val wrapperName = Name("cmd" + CURRENT_LINE) val fileName = wrapperName.encoded + ".sc" def compileRunBlock( leadingSpaces: String, hookInfo: ImportHookInfo, codeSource: CodeSource, eval: (Preprocessor.Output, Name) => Res[(Evaluated, Tag)], indexedWrapperName: Name, wrapperIndex: Int = 1) = { val printSuffix = if (wrapperIndex == 1) "" else " #" + wrapperIndex dynaml_interp.printer.info("Compiling " + codeSource.printablePath + printSuffix) for{ processed <- dynaml_interp.compilerManager.preprocess(codeSource.fileName).transform( hookInfo.stmts, "", leadingSpaces, codeSource.pkgName, indexedWrapperName, dynaml_interp.predefImports ++ dynaml_interp.frameImports ++ hookInfo.imports, _ => "scala.Iterator[String]()", extraCode = "", skipEmpty = false ) (ev, tag) <- eval(processed, indexedWrapperName) } yield ScriptOutput.BlockMetadata( VersionedWrapperId(ev.wrapper.map(_.encoded).mkString("."), tag), leadingSpaces, hookInfo, ev.imports ) } val result = for { blocks <- Preprocessor.splitScript(ammonite.interp.Interpreter.skipSheBangLine(s), fileName) codeSource = CodeSource( wrapperName, Seq(), Seq(Name("ammonite"), Name("$sess")), Some(dynaml_interp.wd/"(console)") ) indexedWrapperName = ammonite.interp.Interpreter.indexWrapperName(codeSource.wrapperName, 1) allSplittedChunks <- Res.Success(blocks) (leadingSpaces, stmts) = allSplittedChunks(1 - 1) (hookStmts, importTrees) = dynaml_interp.parseImportHooks(codeSource, stmts) hookInfo <- dynaml_interp.resolveImportHooks(importTrees, hookStmts, codeSource) res <- compileRunBlock(leadingSpaces, hookInfo, codeSource, (processed, indexedWrapperName) => dynaml_interp.evaluateLine( processed, dynaml_interp.printer, fileName, indexedWrapperName, silent = false, () => CURRENT_LINE += 1), indexedWrapperName ) /*metadata <- dynaml_interp.processAllScriptBlocks( blocks.map(_ => None), Res.Success(blocks), dynaml_interp.predefImports ++ dynaml_interp.frameImports, codeSource, (processed, indexedWrapperName) => dynaml_interp.evaluateLine( processed, dynaml_interp.printer, fileName, indexedWrapperName, silent = false, () => CURRENT_LINE += 1), autoImport = true, "" )*/ } yield { res } if(result.isSuccess) { val output = outputBuffer.toString(Charset.defaultCharset()) outputBuffer.reset() /*val resStr = result.flatMap(d => { Res(Some(d.blockInfo.map(blockm => blockm.finalImports.value.map(d => d.fromName.raw).mkString("\n")).mkString("\n")), "") })*/ output } else { result.toString } } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/utils/Utils.scala
<gh_stars>0 package io.github.mandar2812.dynaml.tensorflow.utils import io.github.mandar2812.dynaml.pipes.{DataPipe, MetaPipe12} import io.github.mandar2812.dynaml.tensorflow.data.AbstractDataSet import org.platanios.tensorflow.api.core.client.Fetchable import org.platanios.tensorflow.api.learn.estimators.Estimator import org.platanios.tensorflow.api.ops.Output import org.platanios.tensorflow.api.ops.io.data.Dataset import org.platanios.tensorflow.api.{Shape, Tensor} object Utils { /** * Convert a float tensor to a Sequence. * */ def toDoubleSeq(t: Tensor): Iterator[Double] = { val datatype = t.dataType.toString() t.entriesIterator.map(x => if(datatype == "FLOAT64") x.asInstanceOf[Double] else x.asInstanceOf[Float].toDouble) } def get_ffstack_properties( neuron_counts: Seq[Int], ff_index: Int, data_type: String): (Seq[Shape], Seq[String], Seq[String]) = { val layer_parameter_names = (ff_index until ff_index + neuron_counts.length - 1).map(i => "Linear_"+i+"/Weights") val layer_shapes = neuron_counts.sliding(2).toSeq.map(c => Shape(c.head, c.last)) val layer_datatypes = Seq.fill(layer_shapes.length)(data_type) (layer_shapes, layer_parameter_names, layer_datatypes) } /** * Calculate the Kullback Leibler divergence of * a probability density from a prior density. * */ def kl(prior: Output, p: Output): Output = prior.divide(p).log.multiply(prior).sum(axes = 1).mean() def kl(prior: Tensor, p: Tensor): Output = prior.divide(p).log.multiply(prior).sum(axes = 1).mean() /** * Calculate the Jensen Shannon divergence * between a probability and a target probability. * */ def js(target_prob: Output, prob: Output) = { val m = target_prob.add(prob).divide(2.0) kl(target_prob, m).add(kl(prob, m)).multiply(0.5) } def js(target_prob: Tensor, prob: Tensor) = { val m = target_prob.add(prob).divide(2.0) kl(target_prob, m).add(kl(prob, m)).multiply(0.5) } /** * Calculate the Hellinger distance between two * probability distributions. * */ def hellinger(target_prob: Output, prob: Output) = target_prob.sqrt.subtract(prob.sqrt).square.sum().sqrt.divide(math.sqrt(2.0)) def hellinger(target_prob: Tensor, prob: Tensor) = target_prob.sqrt.subtract(prob.sqrt).square.sum().sqrt.divide(math.sqrt(2.0)) def cross_entropy(target_prob: Output, prob: Output) = target_prob.multiply(prob.log).sum(axes = 1).multiply(-1.0).mean() /** * Calculate the cross-entropy of two * probability distributions. * */ def cross_entropy(target_prob: Tensor, prob: Tensor) = target_prob.multiply(prob.log).sum(axes = 1).multiply(-1.0).mean() def buffered_preds[ IT, IO, ID, IS, I, TT, TO, TD, TS, EI, InferInput, InferOutput, ModelInferenceOutput]( predictiveModel: Estimator[IT, IO, ID, IS, I, (IT, TT), (IO, TO), (ID, TD), (IS, TS), (I, EI)], workingData: InferInput, buffer: Int, dataSize: Int)( implicit getSplitByIndex: MetaPipe12[InferInput, Int, Int, InferInput], concatenateSplits: DataPipe[Iterable[InferOutput], InferOutput], evFetchableIO: Fetchable.Aux[IO, IT], evFetchableI: Fetchable.Aux[I, ModelInferenceOutput], evFetchableIIO: Fetchable.Aux[(IO, I), (IT, ModelInferenceOutput)], ev: Estimator.SupportedInferInput[InferInput, InferOutput, IT, IO, ID, IS, ModelInferenceOutput] ): InferOutput = { val get_data_split = getSplitByIndex(workingData) val preds_splits: Iterable[InferOutput] = (0 until dataSize) .grouped(buffer) .map(indices => { val progress = math.round(10*indices.head*buffer*100.0/dataSize)/10d print("Progress %:\t") pprint.pprintln(progress) predictiveModel.infer[InferInput, InferOutput, ModelInferenceOutput]( () => get_data_split(indices.head, indices.last)) }).toIterable concatenateSplits(preds_splits) } def predict_data[ IT, IO, ID, IS, I, TT, TO, TD, TS, EI, InferOutput, ModelInferenceOutput]( predictiveModel: Estimator[IT, IO, ID, IS, I, (IT, TT), (IO, TO), (ID, TD), (IS, TS), (I, EI)], data: AbstractDataSet[IT, TT], pred_flags: (Boolean, Boolean) = (false, true), buff_size: Int = 400)( implicit getSplitByIndex: MetaPipe12[IT, Int, Int, IT], concatenateSplits: DataPipe[Iterable[InferOutput], InferOutput], evFetchableIO: Fetchable.Aux[IO, IT], evFetchableI: Fetchable.Aux[I, ModelInferenceOutput], evFetchableIIO: Fetchable.Aux[(IO, I), (IT, ModelInferenceOutput)], ev: Estimator.SupportedInferInput[IT, InferOutput, IT, IO, ID, IS, ModelInferenceOutput] ): (Option[InferOutput], Option[InferOutput]) = { val train_preds: Option[InferOutput] = if (pred_flags._1) { println("\nGenerating predictions for training data.\n") val predictions = buffered_preds[ IT, IO, ID, IS, I, TT, TO, TD, TS, EI, IT, InferOutput, ModelInferenceOutput]( predictiveModel, data.trainData, buff_size, data.nTrain) Some(predictions) } else None val test_preds: Option[InferOutput] = if (pred_flags._2) { println("\nGenerating predictions for test data.\n") val predictions = buffered_preds[ IT, IO, ID, IS, I, TT, TO, TD, TS, EI, IT, InferOutput, ModelInferenceOutput]( predictiveModel, data.testData, buff_size, data.nTest) Some(predictions) } else None (train_preds, test_preds) } /*def predict[ IT, IO, ID, IS, I, TT, TO, TD, TS, EI, ModelInferenceOutput]( predictiveModel: Estimator[IT, IO, ID, IS, I, (IT, TT), (IO, TO), (ID, TD), (IS, TS), (I, EI)], data: DataSet[IT, IO, ID, IS])( implicit evFetchableIO: Fetchable.Aux[IO, IT], evFetchableI: Fetchable.Aux[I, ModelInferenceOutput], evFetchableIIO: Fetchable.Aux[(IO, I), (IT, ModelInferenceOutput)], ev: Estimator.SupportedInferInput[ Dataset[IT,IO,ID,IS], Iterator[(IT, ModelInferenceOutput)], IT, IO, ID, IS, ModelInferenceOutput] ): Iterator[(IT, ModelInferenceOutput)] = predictiveModel.infer(() => data.get)*/ }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/statespace/StateSpaceModel.scala
package io.github.mandar2812.dynaml.models.statespace import java.io.Serializable import POMP._ import breeze.linalg.DenseVector import breeze.stats.distributions.Rand trait StateSpaceModel extends Serializable { // The observation model def observation: Eta => Rand[Observation] // the link function def link(x: Gamma): Eta = Vector(x) // deterministic transformation, such as seasonality def f(s: State, t: Time): Gamma // initialise the SDE state def x0: Rand[State] // Step the SDE def stepFunction: (State, TimeIncrement) => Rand[State] // calculate the likelihood of the observation given the state def dataLikelihood: (Eta, Observation) => LogLikelihood } object StateSpaceModel { def op(mod1: Parameters => StateSpaceModel, mod2: Parameters => StateSpaceModel): Parameters => StateSpaceModel = p => new StateSpaceModel { def observation = x => p match { case BranchParameter(lp,_) => mod1(lp).observation(x) case param: LeafParameter => mod1(param).observation(x) } override def link(x: Double) = mod1(p).link(x) def f(s: State, t: Time) = s match { case BranchState(ls, rs) => mod1(p).f(ls, t) + mod2(p).f(rs, t) case x: LeafState => mod1(p).f(x, t) } def x0 = p match { case BranchParameter(lp, rp) => for { l <- mod1(lp).x0 r <- mod2(rp).x0 } yield l |+| r case param: LeafParameter => for { l <- mod1(param).x0 r <- mod2(param).x0 } yield l |+| r } def stepFunction = (s, dt) => (s, p) match { case (BranchState(ls, rs), BranchParameter(lp, rp)) => for { l <- mod1(lp).stepFunction(ls, dt) r <- mod2(rp).stepFunction(rs, dt) } yield BranchState(l, r) case (x: LeafState, param: LeafParameter) => // Null model case, non-null must be on left mod1(param).stepFunction(x, dt) } def dataLikelihood = (s, y) => p match { case param: LeafParameter => mod1(param).dataLikelihood(s, y) case BranchParameter(lp, _) => mod1(lp).dataLikelihood(s, y) } } def zeroModel(stepFun: SdeParameter => (State, TimeIncrement) => Rand[State]): Parameters => StateSpaceModel = p => new StateSpaceModel { def observation = x => new Rand[Observation] { def draw = x.head } def f(s: State, t: Time) = s.head def x0 = new Rand[State] { def draw = LeafState(DenseVector[Double]()) } def stepFunction = p match { case LeafParameter(_,_,sdeparam @unchecked) => stepFun(sdeparam) } def dataLikelihood = (s, y) => 0.0 } }
amitkumarj441/DynaML
dynaml-pipes/src/main/scala-2.11/io/github/mandar2812/dynaml/pipes/DataPipe4.scala
<reponame>amitkumarj441/DynaML<filename>dynaml-pipes/src/main/scala-2.11/io/github/mandar2812/dynaml/pipes/DataPipe4.scala /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.pipes /** * @author mandar date: 17/02/2017. * * Data Pipes representing functions of 4 arguments */ trait DataPipe4[-Source1, -Source2, -Source3, -Source4, +Result] extends DataPipeConvertible[(Source1, Source2, Source3, Source4), Result] with Serializable { self => def run(data1: Source1, data2: Source2, data3: Source3, data4: Source4): Result def apply(data1: Source1, data2: Source2, data3: Source3, data4: Source4): Result = run(data1, data2, data3, data4) def >[Result2](otherPipe: DataPipe[Result, Result2]): DataPipe4[Source1, Source2, Source3, Source4, Result2] = DataPipe4((d1: Source1, d2:Source2, d3: Source3, d4: Source4) => otherPipe.run(self.run(d1, d2, d3, d4))) override def toPipe: ((Source1, Source2, Source3, Source4)) => Result = (x: (Source1, Source2, Source3, Source4)) => self.run(x._1, x._2, x._3, x._4) } object DataPipe4 { def apply[Source1, Source2, Source3, Source4, Result](func4: (Source1, Source2, Source3, Source4) => Result) : DataPipe4[Source1, Source2, Source3, Source4, Result] = new DataPipe4[Source1, Source2, Source3, Source4, Result] { override def run(data1: Source1, data2: Source2, data3: Source3, data4: Source4) = func4(data1, data2, data3, data4) } }
amitkumarj441/DynaML
dynaml-repl/src/main/scala-2.11/io/github/mandar2812/dynaml/repl/DynaMLEvaluator.scala
package io.github.mandar2812.dynaml.repl import java.nio.charset.Charset import ammonite.runtime.Evaluator.{evaluatorRunPrinter, userCodeExceptionHandler} import ammonite.runtime.{Evaluator, Frame} import ammonite.util.Util.ClassFiles import ammonite.util._ import org.apache.commons.io.output.ByteArrayOutputStream import scala.util.Try abstract class DynaMLEvaluator extends Evaluator { def processCell(classFiles: ClassFiles, newImports: Imports, usedEarlierDefinitions: Seq[String], printer: Printer, indexedWrapperName: Name, wrapperPath: Seq[Name], silent: Boolean, contextClassLoader: ClassLoader): Res[Evaluated] } object DynaMLEvaluator { def apply(headFrame: => Frame): DynaMLEvaluator = new DynaMLEvaluator { eval => def processCell(classFiles: ClassFiles, newImports: Imports, usedEarlierDefinitions: Seq[String], printer: Printer, indexedWrapperName: Name, wrapperPath: Seq[Name], silent: Boolean, contextClassLoader: ClassLoader) = { for { cls <- loadClass("ammonite.$sess." + indexedWrapperName.backticked, classFiles) _ <- Catching{userCodeExceptionHandler} } yield { headFrame.usedEarlierDefinitions = usedEarlierDefinitions // Exhaust the printer iterator now, before exiting the `Catching` // block, so any exceptions thrown get properly caught and handled val iter = evalMain(cls, contextClassLoader).asInstanceOf[Iterator[String]] if (!silent) evaluatorRunPrinter(iter.foreach(printer.resultStream.print)) else evaluatorRunPrinter(iter.foreach(_ => ())) // "" Empty string as cache tag of repl code evaluationResult( Seq(Name("ammonite"), Name("$sess"), indexedWrapperName), wrapperPath, newImports ) } } def loadClass(fullName: String, classFiles: ClassFiles): Res[Class[_]] = { Res[Class[_]]( Try { for ((name, bytes) <- classFiles.sortBy(_._1)) { headFrame.classloader.addClassFile(name, bytes) } headFrame.classloader.findClass(fullName) }, e =>"Failed to load compiled class " + e ) } def evalMain(cls: Class[_], contextClassloader: ClassLoader) = Util.withContextClassloader(contextClassloader){ val (method, instance) = try { (cls.getDeclaredMethod("$main"), null) } catch { case e: NoSuchMethodException => // Wrapper with very long names seem to require this try { val cls0 = contextClassloader.loadClass(cls.getName + "$") val inst = cls0.getDeclaredField("MODULE$").get(null) (cls0.getDeclaredMethod("$main"), inst) } catch { case _: ClassNotFoundException | _: NoSuchMethodException => throw e } } method.invoke(instance) } def processLine(classFiles: Util.ClassFiles, newImports: Imports, usedEarlierDefinitions: Seq[String], printer: Printer, indexedWrapperName: Name, wrapperPath: Seq[Name], silent: Boolean, contextClassLoader: ClassLoader) = { for { cls <- loadClass("ammonite.$sess." + indexedWrapperName.backticked, classFiles) _ <- Catching{userCodeExceptionHandler} } yield { headFrame.usedEarlierDefinitions = usedEarlierDefinitions // Exhaust the printer iterator now, before exiting the `Catching` // block, so any exceptions thrown get properly caught and handled val iter = evalMain(cls, contextClassLoader).asInstanceOf[Iterator[String]] if (!silent) evaluatorRunPrinter(iter.foreach(printer.resultStream.print)) else evaluatorRunPrinter(iter.foreach(_ => ())) // "" Empty string as cache tag of repl code evaluationResult( Seq(Name("ammonite"), Name("$sess"), indexedWrapperName), wrapperPath, newImports ) } } def processScriptBlock(cls: Class[_], newImports: Imports, usedEarlierDefinitions: Seq[String], wrapperName: Name, wrapperPath: Seq[Name], pkgName: Seq[Name], contextClassLoader: ClassLoader) = { for { _ <- Catching{userCodeExceptionHandler} } yield { headFrame.usedEarlierDefinitions = usedEarlierDefinitions evalMain(cls, contextClassLoader) val res = evaluationResult(pkgName :+ wrapperName, wrapperPath, newImports) res } } def evaluationResult(wrapperName: Seq[Name], internalWrapperPath: Seq[Name], imports: Imports) = { Evaluated( wrapperName, Imports( for(id <- imports.value) yield { val filledPrefix = if (internalWrapperPath.isEmpty) { val filledPrefix = if (id.prefix.isEmpty) { // For some reason, for things not-in-packages you can't access // them off of `_root_` wrapperName } else { id.prefix } if (filledPrefix.headOption.exists(_.backticked == "_root_")) filledPrefix else Seq(Name("_root_")) ++ filledPrefix } else if (id.prefix.isEmpty) // For some reason, for things not-in-packages you can't access // them off of `_root_` Seq(Name("_root_")) ++ wrapperName ++ internalWrapperPath else if (id.prefix.startsWith(wrapperName)) Seq(Name("_root_")) ++ wrapperName.init ++ Seq(id.prefix.apply(wrapperName.length)) ++ internalWrapperPath ++ id.prefix.drop(wrapperName.length + 1) else if (id.prefix.headOption.exists(_.backticked == "_root_")) id.prefix else Seq(Name("_root_")) ++ id.prefix id.copy(prefix = filledPrefix) } ) ) } } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/Learn.scala
package io.github.mandar2812.dynaml.tensorflow import io.github.mandar2812.dynaml.tensorflow.layers.{DynamicTimeStepCTRNN, FiniteHorizonCTRNN, FiniteHorizonLinear} import org.platanios.tensorflow.api.learn.StopCriteria import org.platanios.tensorflow.api.learn.layers.{Activation, Input, Layer} import org.platanios.tensorflow.api.ops.NN.SameConvPadding import org.platanios.tensorflow.api.ops.Output import org.platanios.tensorflow.api.ops.io.data.Dataset import org.platanios.tensorflow.api.ops.training.optimizers.Optimizer import org.platanios.tensorflow.api.types.DataType import org.platanios.tensorflow.api.{FLOAT32, Graph, Shape, Tensor, tf, _} private[tensorflow] object Learn { type TFDATA = Dataset[(Tensor, Tensor), (Output, Output), (DataType, DataType), (Shape, Shape)] val Phi: layers.Phi.type = layers.Phi val Tanh: layers.Tanh.type = layers.Tanh val GeneralizedLogistic : layers.GeneralizedLogistic.type = layers.GeneralizedLogistic val batch_norm: layers.BatchNormalisation.type = layers.BatchNormalisation val ctrnn: layers.FiniteHorizonCTRNN.type = layers.FiniteHorizonCTRNN val dctrnn: layers.DynamicTimeStepCTRNN.type = layers.DynamicTimeStepCTRNN val ts_linear: layers.FiniteHorizonLinear.type = layers.FiniteHorizonLinear val rbf_layer: layers.RBFLayer.type = layers.RBFLayer val stack_outputs: layers.StackOutputs.type = layers.StackOutputs val concat_outputs: layers.ConcatenateOutputs.type = layers.ConcatenateOutputs val seq_layer: layers.SeqLayer.type = layers.SeqLayer val combined_layer: layers.CombinedLayer.type = layers.CombinedLayer val unstack: layers.Unstack.type = layers.Unstack val identity: layers.IdentityLayer.type = layers.IdentityLayer val tuple2_layer: layers.Tuple2Layer.type = layers.Tuple2Layer val stack_tuple2: layers.StackTuple2.type = layers.StackTuple2 val concat_tuple2: layers.ConcatenateTuple2.type = layers.ConcatenateTuple2 /** * Stop after a specified maximum number of iterations has been reached. * */ val max_iter_stop: Long => StopCriteria = (n: Long) => tf.learn.StopCriteria(maxSteps = Some(n)) /** * Stop after the change in the loss function falls below a specified threshold. * */ val abs_loss_change_stop: (Double, Long) => StopCriteria = (d: Double, max_iter: Long) => tf.learn.StopCriteria( absLossChangeTol = Some(d), maxSteps = Some(max_iter)) /** * Stop after the relative change in the loss function falls below a specified threshold. * */ val rel_loss_change_stop: (Double, Long) => StopCriteria = (d: Double, max_iter: Long) => tf.learn.StopCriteria( relLossChangeTol = Some(d), maxSteps = Some(max_iter)) /** * Constructs a feed-forward layer. * * @param num_units The number of neurons in the layer * @param id A unique integer id for constructing the layer name. * * */ def feedforward(num_units: Int, useBias: Boolean = true)(id: Int) = tf.learn.Linear("Linear_"+id, num_units, useBias) /** * Constructs a simple feed-forward stack of layers. * * @param get_act A function which given a layer index number, * returns an activation function. * * @param dataType The data type of the layer weights/biases. * * @param layer_sizes A Sequence of layer sizes/dimensions/neuron counts. * * @param starting_index Specify which layer number should the indexing of * the layers start with, defaults to 1. * */ def feedforward_stack( get_act: Int => Activation, dataType: DataType)( layer_sizes: Seq[Int], starting_index: Int = 1): Layer[Output, Output] = { def stack_ff_layers_rec( ls: Seq[Int], layer_acc: Layer[Output, Output], layer_index: Int): Layer[Output, Output] = ls match { case Seq() => layer_acc case Seq(num_output_units) => layer_acc >> dtflearn.feedforward(num_output_units)(layer_index) case _ => stack_ff_layers_rec( ls.tail, layer_acc >> dtflearn.feedforward(ls.head)(layer_index) >> get_act(layer_index), layer_index + 1) } stack_ff_layers_rec( layer_sizes, tf.learn.Cast(s"Cast_$starting_index", dataType), starting_index) } /** * Constructs a symmetric (square) convolutional layer from the provided dimensions. * * [[org.platanios.tensorflow.api.ops.NN.SameConvPadding]] is used as the padding mode. * * @param size The size of each square filter e.g. 2*2, 3*3 etc * @param num_channels_input The number of channels in the input * @param num_filters The number of channels in the layer output * @param strides A [[Tuple2]] with strides, for each direction i.e. breadth and height. * @param index The layer id or index, helps in creating a unique layer name * */ def conv2d(size: Int, num_channels_input: Int, num_filters: Int, strides: (Int, Int))(index: Int) = tf.learn.Conv2D( "Conv2D_"+index, Shape(size, size, num_channels_input, num_filters), strides._1, strides._2, SameConvPadding) /** * Constructs a convolutional layer activated by a ReLU, with * an option of appending a dropout layer. * * */ def conv2d_unit( shape: Shape, stride: (Int, Int) = (1, 1), relu_param: Float = 0.1f, dropout: Boolean = true, keep_prob: Float = 0.6f)(i: Int) = if(dropout) { tf.learn.Conv2D("Conv2D_"+i, shape, stride._1, stride._2, SameConvPadding) >> tf.learn.AddBias(name = "Bias_"+i) >> tf.learn.ReLU("ReLU_"+i, relu_param) >> tf.learn.Dropout("Dropout_"+i, keep_prob) } else { tf.learn.Conv2D("Conv2D_"+i, shape, stride._1, stride._2, SameConvPadding) >> batch_norm(name = "BatchNorm_"+i) >> tf.learn.ReLU("ReLU_"+i, relu_param) >> tf.learn.Cast("Cast_"+i, FLOAT32) } /** * Constructs an inverted convolutional pyramid, consisting of * stacked versions of [Conv2d --> ReLU --> Dropout] layers. * * The number of filters learned in each Conv2d layer are * arranged in decreasing exponents of 2. They are costructed * using calls to [[conv2d_unit()]] * * ... Conv_unit(128) --> Conv_unit(64) --> Conv_unit(32) --> Conv_unit(16) ... * * @param size The size of the square convolutional filter to be applied * in each segment. * @param num_channels_input The number of channels in the input. * @param start_num_bits The exponent of 2 which determines size/depth of the starting layer * e.g. set to 4 for a depth of 16. * * @param end_num_bits The exponent of 2 which determines the size/depth of the end layer. * * @param relu_param The activation barrier of the ReLU activation. * * @param dropout Set to true, if dropout layers should be placed in each convolutional unit. * Set to false, and batch normalisation layers shall be placed after each convolutional unit. * * @param keep_prob If dropout is enabled, then this determines the retain probability. * */ def conv2d_pyramid( size: Int, num_channels_input: Int)( start_num_bits: Int, end_num_bits: Int)( relu_param: Float = 0.1f, dropout: Boolean = true, keep_prob: Float = 0.6f, starting_index: Int = 0) = { require( start_num_bits > end_num_bits, "To construct a 2d-convolutional pyramid, you need to start_num_bits > end_num_bits") //Create the first layer segment. val head_segment = conv2d_unit( Shape(size, size, num_channels_input, math.pow(2, start_num_bits).toInt), stride = (1, 1), relu_param, dropout, keep_prob)(starting_index) //Create the rest of the pyramid val tail_segments = (end_num_bits until start_num_bits).reverse.zipWithIndex.map(bitsAndIndices => { val (bits, index) = bitsAndIndices conv2d_unit( Shape(size, size, math.pow(2, bits+1).toInt, math.pow(2, bits).toInt), stride = (math.pow(2, index+1).toInt, math.pow(2, index+1).toInt), relu_param, dropout, keep_prob)(index+1+starting_index) }).reduceLeft((a,b) => a >> b) //Join head to tail. head_segment >> tail_segments } /** * <h4>Inception Module</h4> * * Constructs an Inception v2 computational unit, * optionally with batch normalisation. * * Assumes input to be of shape Shape(?, height, width, channels) * * <b>Architecture Details</b> * * An Inception module consists of the following branches. * * <ol> * <li>Convolution (1 &times; 1)</li> * <li>Convolution (1 &times; 1) -> Convolution (3 &times; 3)</li> * <li>Convolution (1 &times; 1) -> Convolution (5 &times; 5)</li> * <li>Max Pooling (1 &times; 1) -> Convolution (1 &times; 1)</li> * </ol> * * After performing the operations above, the module performs depth-wise * concatenation of the results. * * <b>Implementation Notes</b> * * Each convolution is followed by a batch normalisation layer (if applicable) * followed by a Rectified Linear activation. * * * @param channels The depth of the input. * @param num_filters The number of filters to learn in each branch of * the module, supplied as a sequence of integers. * @param use_batch_norm If true, apply batch normalisation at the end * of each convolution. * @param relu_param The parameter to be fed to each ReLU activation. * */ def inception_unit( channels: Int, num_filters: Seq[Int], relu_param: Float = 0.01f, use_batch_norm: Boolean = true)( layer_index: Int): Layer[Output, Output] = { require(num_filters.length == 4, s"Inception module has only 4 branches, but ${num_filters.length}" + s" were assigned while setting num_filters variable") val name = s"Inception_$layer_index" def get_post_conv_layer(b_index: Int, l_index: Int) = if(use_batch_norm) { batch_norm (s"$name/B$b_index/BatchNorm_$l_index") >> tf.learn.ReLU(s"$name/B$b_index/ReLU_$l_index", relu_param) } else { tf.learn.ReLU(s"$name/B$b_index/ReLU_$l_index", relu_param) } val branch1 = tf.learn.Conv2D( s"$name/B1/Conv2D_1x1", Shape(1, 1, channels, num_filters.head), 1, 1, SameConvPadding) >> get_post_conv_layer(1, 1) val branch2 = tf.learn.Conv2D(s"$name/B2/Conv2D_1x1", Shape(1, 1, channels, num_filters(1)), 1, 1, SameConvPadding) >> get_post_conv_layer(2, 1) >> tf.learn.Conv2D(s"$name/B2/Conv2D_1x3", Shape(1, 3, num_filters(1), num_filters(1)), 1, 1, SameConvPadding) >> tf.learn.Conv2D(s"$name/B2/Conv2D_3x1", Shape(3, 1, num_filters(1), num_filters(1)), 1, 1, SameConvPadding) >> get_post_conv_layer(2, 2) val branch3 = tf.learn.Conv2D(s"$name/B3/Conv2D_1x1", Shape(1, 1, channels, num_filters(2)), 1, 1, SameConvPadding) >> get_post_conv_layer(3, 1) >> tf.learn.Conv2D(s"$name/B3/Conv2D_1x3_1", Shape(1, 3, num_filters(2), num_filters(2)), 1, 1, SameConvPadding) >> tf.learn.Conv2D(s"$name/B3/Conv2D_3x1_1", Shape(3, 1, num_filters(2), num_filters(2)), 1, 1, SameConvPadding) >> tf.learn.Conv2D(s"$name/B3/Conv2D_1x3_2", Shape(1, 3, num_filters(2), num_filters(2)), 1, 1, SameConvPadding) >> tf.learn.Conv2D(s"$name/B3/Conv2D_3x1_2", Shape(3, 1, num_filters(2), num_filters(2)), 1, 1, SameConvPadding) >> get_post_conv_layer(3, 2) val branch4 = tf.learn.MaxPool(s"$name/B4/MaxPool", Seq(1, 3, 3, 1), 1, 1, SameConvPadding) >> tf.learn.Conv2D(s"$name/B4/Conv2D_1x1", Shape(1, 1, channels, num_filters(3)), 1, 1, SameConvPadding) >> get_post_conv_layer(4, 1) val layers = Seq( branch1, branch2, branch3, branch4 ) combined_layer(name, layers) >> concat_outputs(name+"/DepthConcat", -1) } /** * Create a stack of Inception modules (See [[inception_unit()]] for more details). * * @param num_channels_image The depth, or number of colour channels in the image. * @param num_filters Specifies the number of filters for each branch of every inception module. * @param starting_index The starting index of the stack. The stack is named in a consecutive manner, * i.e. Inception_i, Inception_i+1, ... * */ def inception_stack( num_channels_image: Int, num_filters: Seq[Seq[Int]])( starting_index: Int): Layer[Output, Output] = { val head = inception_unit(num_channels_image, num_filters.head)(starting_index) val tail = num_filters.sliding(2) .map(pair => inception_unit(pair.head.sum, pair.last) _) .zipWithIndex .map(layer_fn_index_pair => { val (create_inception_layer, index) = layer_fn_index_pair create_inception_layer(index + starting_index + 1) }).reduceLeft((l1, l2) => l1 >> l2) head >> tail } /** * Constructs a Continuous Time Recurrent Neural Network (CTRNN) Layer, consisting * of some latent states, composed with a linear projection into the space of observables. * * @param observables The dimensionality of the output space. * @param timestep The integration time step, if set to 0 or a negative * value, create a [[DynamicTimeStepCTRNN]]. * @param horizon The number of steps in time to simulate the dynamical system * @param index The layer index, should be unique. * */ def ctrnn_block( observables: Int, horizon: Int, timestep: Double = -1d)(index: Int) = if (timestep <= 0d) { DynamicTimeStepCTRNN(s"FHctrnn_$index", horizon) >> FiniteHorizonLinear(s"FHlinear_$index", observables) } else { FiniteHorizonCTRNN(s"FHctrnn_$index", horizon, timestep) >> FiniteHorizonLinear(s"FHlinear_$index", observables) } /** * <h4>Supervised Learning</h4> * * Trains a supervised tensorflow model/estimator. * * @tparam IT The type representing input tensors, * e.g. `Tensor`, `(Tensor, Tensor)`, `Seq[Tensor]` etc. * * @tparam IO The type representing symbolic tensors of the input patterns, * e.g. `Output`, `(Output, Output)`, `Seq[Output]` etc. * * @tparam IDA The underlying (scalar) data types of the input, * e.g. `DataType.Aux[Double]`, `(DataType.Aux[Double], DataType.Aux[Double])` etc. * * @tparam ID The input pattern's tensorflow data type, * e.g. `FLOAT64`, `(FLOAT64, FLOAT64)`, etc. * * @tparam IS The type of the input pattern's shape, * e.g. `Shape`, `(Shape, Shape)`, `Seq[Shape]` * * @tparam I The type of the symbolic tensor returned by the neural architecture, * e.g. `Output`, `(Output, Output)`, `Seq[Output]` * * @tparam TT The type representing target/label tensors, * e.g. `Tensor`, `(Tensor, Tensor)`, `Seq[Tensor]` etc. * @tparam TO The type representing symbolic tensors of the target patterns, * e.g. `Output`, `(Output, Output)`, `Seq[Output]` etc. * @tparam TDA The underlying (scalar) data types of the targets, * e.g. `DataType.Aux[Double]`, `(DataType.Aux[Double], DataType.Aux[Double])` etc. * * @tparam TD The target pattern's tensorflow data type, * e.g. `FLOAT64`, `(FLOAT64, FLOAT64)`, etc. * * @tparam TS The type of the target pattern's shape, * e.g. `Shape`, `(Shape, Shape)`, `Seq[Shape]` * * @tparam T The type of the symbolic tensor of the processed targets, this is the type * of the tensorflow symbol which is used to compute the loss. * e.g. `Output`, `(Output, Output)`, `Seq[Output]` * * @param architecture The network architecture, * takes a value of type [[IO]] and returns * a value of type [[I]]. * @param input The input meta data. * @param target The output label meta data * @param processTarget A computation layer which converts * the original target of type [[TO]] * into a type [[T]], usable by the Estimator API * @param loss The loss function to be optimized during training. * @param optimizer The optimization algorithm implementation. * @param summariesDir A filesystem path of type [[java.nio.file.Path]], which * determines where the intermediate model parameters/checkpoints * will be written. * @param stopCriteria The stopping criteria for training, for examples see * [[max_iter_stop]], [[abs_loss_change_stop]] and [[rel_loss_change_stop]] * * @param stepRateFreq The frequency at which to log the step rate (expressed as number of iterations/sec). * @param summarySaveFreq The frequency at which to log the loss summary. * @param checkPointFreq The frequency at which to log the model parameters. * @param training_data A training data set, as an instance of [[Dataset]]. * @param inMemory Set to true if the estimator should be in-memory. * * @return A [[Tuple2]] containing the model and estimator. * * @author mandar2812 * */ def build_tf_model[ IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T]( architecture: Layer[IO, I], input: Input[IT, IO, IDA, ID, IS], target: Input[TT, TO, TDA, TD, TS], processTarget: Layer[TO, T], loss: Layer[(I, T), Output], optimizer: Optimizer, summariesDir: java.nio.file.Path, stopCriteria: StopCriteria, stepRateFreq: Int = 5000, summarySaveFreq: Int = 5000, checkPointFreq: Int = 5000)( training_data: Dataset[ (IT, TT), (IO, TO), (ID, TD), (IS, TS)], inMemory: Boolean = false ) = { val (model, estimator) = tf.createWith(graph = Graph()) { val model = tf.learn.Model.supervised( input, architecture, target, processTarget, loss, optimizer) println("\nTraining model.\n") val estimator = if(inMemory) { tf.learn.InMemoryEstimator( model, tf.learn.Configuration(Some(summariesDir)), stopCriteria, Set( tf.learn.StepRateLogger( log = false, summaryDir = summariesDir, trigger = tf.learn.StepHookTrigger(stepRateFreq)), tf.learn.SummarySaver(summariesDir, tf.learn.StepHookTrigger(summarySaveFreq)), tf.learn.CheckpointSaver(summariesDir, tf.learn.StepHookTrigger(checkPointFreq))), tensorBoardConfig = tf.learn.TensorBoardConfig(summariesDir, reloadInterval = checkPointFreq)) } else { tf.learn.FileBasedEstimator( model, tf.learn.Configuration(Some(summariesDir)), stopCriteria, Set( tf.learn.StepRateLogger( log = false, summaryDir = summariesDir, trigger = tf.learn.StepHookTrigger(stepRateFreq)), tf.learn.SummarySaver(summariesDir, tf.learn.StepHookTrigger(summarySaveFreq)), tf.learn.CheckpointSaver(summariesDir, tf.learn.StepHookTrigger(checkPointFreq))), tensorBoardConfig = tf.learn.TensorBoardConfig(summariesDir, reloadInterval = checkPointFreq)) } estimator.train(() => training_data) (model, estimator) } (model, estimator) } /** * <h4>Unsupervised Learning</h4> * * Trains an unsupervised tensorflow model/estimator. * * @tparam IT The type representing input tensors, * e.g. `Tensor`, `(Tensor, Tensor)`, `Seq[Tensor]` etc. * * @tparam IO The type representing symbolic tensors of the input patterns, * e.g. `Output`, `(Output, Output)`, `Seq[Output]` etc. * * @tparam IDA The underlying (scalar) data types of the input, * e.g. `DataType.Aux[Double]`, `(DataType.Aux[Double], DataType.Aux[Double])` etc. * * @tparam ID The input pattern's tensorflow data type, * e.g. `FLOAT64`, `(FLOAT64, FLOAT64)`, etc. * * @tparam IS The type of the input pattern's shape, * e.g. `Shape`, `(Shape, Shape)`, `Seq[Shape]` * * @tparam I The type of the symbolic tensor returned by the neural architecture, * e.g. `Output`, `(Output, Output)`, `Seq[Output]` * * @param architecture The network architecture, * takes a value of type [[IO]] and returns * a value of type [[I]]. * @param input The input meta data. * @param loss The loss function to be optimized during training. * @param optimizer The optimization algorithm implementation. * @param summariesDir A filesystem path of type [[java.nio.file.Path]], which * determines where the intermediate model parameters/checkpoints * will be written. * @param stopCriteria The stopping criteria for training, for examples see * [[max_iter_stop]], [[abs_loss_change_stop]] and [[rel_loss_change_stop]] * * @param stepRateFreq The frequency at which to log the step rate (expressed as number of iterations/sec). * @param summarySaveFreq The frequency at which to log the loss summary. * @param checkPointFreq The frequency at which to log the model parameters. * @param training_data A training data set, as an instance of [[Dataset]]. * @param inMemory Set to true if the estimator should be in-memory. * * @return A [[Tuple2]] containing the model and estimator. * * @author mandar2812 * */ def build_tf_model[IT, IO, IDA, ID, IS, I]( architecture: Layer[IO, I], input: Input[IT, IO, IDA, ID, IS], loss: Layer[(IO, I), Output], optimizer: Optimizer, summariesDir: java.nio.file.Path, stopCriteria: StopCriteria, stepRateFreq: Int, summarySaveFreq: Int, checkPointFreq: Int)( training_data: Dataset[IT, IO, ID, IS], inMemory: Boolean ) = { val (model, estimator) = tf.createWith(graph = Graph()) { val model = tf.learn.Model.unsupervised(input, architecture, loss, optimizer) println("\nTraining model.\n") val estimator = if(inMemory) { tf.learn.InMemoryEstimator( model, tf.learn.Configuration(Some(summariesDir)), stopCriteria, Set( tf.learn.StepRateLogger( log = false, summaryDir = summariesDir, trigger = tf.learn.StepHookTrigger(stepRateFreq)), tf.learn.SummarySaver(summariesDir, tf.learn.StepHookTrigger(summarySaveFreq)), tf.learn.CheckpointSaver(summariesDir, tf.learn.StepHookTrigger(checkPointFreq))), tensorBoardConfig = tf.learn.TensorBoardConfig(summariesDir, reloadInterval = checkPointFreq)) } else { tf.learn.FileBasedEstimator( model, tf.learn.Configuration(Some(summariesDir)), stopCriteria, Set( tf.learn.StepRateLogger( log = false, summaryDir = summariesDir, trigger = tf.learn.StepHookTrigger(stepRateFreq)), tf.learn.SummarySaver(summariesDir, tf.learn.StepHookTrigger(summarySaveFreq)), tf.learn.CheckpointSaver(summariesDir, tf.learn.StepHookTrigger(checkPointFreq))), tensorBoardConfig = tf.learn.TensorBoardConfig(summariesDir, reloadInterval = checkPointFreq)) } estimator.train(() => training_data) (model, estimator) } (model, estimator) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/Model.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.models import breeze.linalg._ import io.github.mandar2812.dynaml.kernels.SVMKernel import io.github.mandar2812.dynaml.optimization.GloballyOptimizable import io.github.mandar2812.dynaml.evaluation.Metrics import io.github.mandar2812.dynaml.kernels._ import io.github.mandar2812.dynaml.optimization._ import scala.util.Random /** * Basic Higher Level abstraction * for Machine Learning models. * * @tparam T The type of the training & test data * * @tparam Q The type of a single input pattern * * @tparam R The type of a single output pattern * * */ trait Model[T, Q, R] { /** * The training data * */ protected val g: T def data = g /** * Predict the value of the * target variable given a * point. * * */ def predict(point: Q): R } /** * Skeleton of Parameterized Model * @tparam G The type of the underlying data. * @tparam T The type of the parameters * @tparam Q A Vector/Matrix representing the features of a point * @tparam R The type of the output of the predictive model * i.e. A Real Number or a Vector of outputs. * @tparam S The type of the edge containing the * features and label. * * */ trait ParameterizedLearner[G, T, Q, R, S] extends Model[G, Q, R] { protected var params: T protected val optimizer: RegularizedOptimizer[T, Q, R, S] /** * Learn the parameters * of the model. * * */ def learn(): Unit /** * Get the value of the parameters * of the model. * */ def parameters() = this.params def updateParameters(param: T): Unit = { this.params = param } def setMaxIterations(i: Int): this.type = { this.optimizer.setNumIterations(i) this } def setBatchFraction(f: Double): this.type = { assert(f >= 0.0 && f <= 1.0, "Mini-Batch Fraction should be between 0.0 and 1.0") this.optimizer.setMiniBatchFraction(f) this } def setLearningRate(alpha: Double): this.type = { this.optimizer.setStepSize(alpha) this } def setRegParam(r: Double): this.type = { this.optimizer.setRegParam(r) this } def initParams(): T } /** * Represents skeleton of a * Linear Model. * * @tparam T The underlying type of the data structure * ex. Gremlin, Neo4j, Spark RDD etc * @tparam P A Vector/Matrix of Doubles * @tparam Q A Vector/Matrix representing the features of a point * @tparam R The type of the output of the predictive model * i.e. A Real Number or a Vector of outputs. * @tparam S The type of the data containing the * features and label. * */ trait LinearModel[T, P, Q , R, S] extends ParameterizedLearner[T, P, Q, R, S] { /** * The non linear feature mapping implicitly * defined by the kernel applied, this is initialized * to an identity map. * */ var featureMap: (Q) => Q = identity def clearParameters(): Unit } /** * An evaluable model is on in which * there is a function taking in a csv * reader object pointing to a test csv file * and returns the appropriate [[Metrics]] object * * @tparam P The type of the model's Parameters * @tparam R The type of the output value * */ trait EvaluableModel [P, R]{ def evaluate(config: Map[String, String]): Metrics[R] } abstract class KernelizedModel[G, L, T <: Tensor[K1, Double], Q <: Tensor[K2, Double], R, K1, K2](protected val task: String) extends LinearModel[G, T, Q, R, L] with GloballyOptimizable with EvaluableModel[T, R]{ override protected var hyper_parameters: List[String] = List("RegParam") override protected var current_state: Map[String, Double] = Map("RegParam" -> 1.0) protected val nPoints: Long def npoints = nPoints /** * This variable stores the indexes of the * prototype points of the data set. * */ protected var points: List[Long] = List() def getXYEdges: L /** * Implements the changes in the model * after application of a given kernel. * * It calculates * * 1) Eigen spectrum of the kernel * * 2) Calculates an approximation to the * non linear feature map induced by the * application of the kernel * * @param kernel A kernel object. * @param M The number of prototypes to select * in order to approximate the kernel * matrix. * */ def applyKernel(kernel: SVMKernel[DenseMatrix[Double]], M: Int = math.sqrt(nPoints).toInt): Unit = {} /** * Calculate an approximation to * the subset of size M * with the maximum entropy. * */ def optimumSubset(M: Int): Unit def trainTest(test: List[Long]): (L,L) def crossvalidate(folds: Int, reg: Double, optionalStateFlag: Boolean = false): (Double, Double, Double) = { //Create the folds as lists of integers //which index the data points this.optimizer.setRegParam(reg).setNumIterations(40) .setStepSize(0.001).setMiniBatchFraction(1.0) val shuffle = Random.shuffle((1L to this.npoints).toList) //run batch sgd on each fold //and test val avg_metrics: DenseVector[Double] = (1 to folds).map{a => //For the ath fold //partition the data //ceil(a-1*npoints/folds) -- ceil(a*npoints/folds) //as test and the rest as training val test = shuffle.slice((a-1)*this.nPoints.toInt/folds, a*this.nPoints.toInt/folds) val(training_data, test_data) = this.trainTest(test) val tempparams = this.optimizer.optimize((folds - 1 / folds) * this.npoints, training_data, this.initParams()) val metrics = this.evaluateFold(tempparams)(test_data)(this.task) val res: DenseVector[Double] = metrics.kpi() / folds.toDouble res }.reduce(_+_) (avg_metrics(0), avg_metrics(1), avg_metrics(2)) } def evaluateFold(params: T) (test_data_set: L) (task: String): Metrics[Double] def applyFeatureMap: Unit } object KernelizedModel { def getOptimizedModel[G, H, M <: KernelizedModel[G, H, DenseVector[Double], DenseVector[Double], Double, Int, Int]](model: M, globalOptMethod: String, kernel: String, prototypes: Int, grid: Int, step: Double, logscale: Boolean = true, csaIt: Int = 5) = { val gs = globalOptMethod match { case "gs" => new GridSearch[model.type](model).setGridSize(grid) .setStepSize(step).setLogScale(logscale) case "csa" => new CoupledSimulatedAnnealing[model.type](model).setGridSize(grid) .setStepSize(step).setLogScale(logscale).setMaxIterations(csaIt) } kernel match { case "RBF" => gs.optimize(Map("bandwidth" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "RBF", "subset" -> prototypes.toString)) case "Polynomial" => gs.optimize(Map("degree" -> 1.0, "offset" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "Polynomial", "subset" -> prototypes.toString)) case "Exponential" => gs.optimize(Map("beta" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "Exponential", "subset" -> prototypes.toString)) case "Laplacian" => gs.optimize(Map("beta" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "Laplacian", "subset" -> prototypes.toString)) case "Cauchy" => gs.optimize(Map("sigma" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "Cauchy", "subset" -> prototypes.toString)) case "RationalQuadratic" => gs.optimize(Map("c" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "RationalQuadratic", "subset" -> prototypes.toString)) case "Wave" => gs.optimize(Map("theta" -> 1.0, "RegParam" -> 0.5), Map("kernel" -> "Wave", "subset" -> prototypes.toString)) case "Linear" => gs.optimize(Map("RegParam" -> 0.5)) } } } trait SubsampledDualLSSVM[G, L] extends KernelizedModel[G, L, DenseVector[Double], DenseVector[Double], Double, Int, Int]{ var kernel :SVMKernel[DenseMatrix[Double]] = null var (feature_a, b): (DenseMatrix[Double], DenseVector[Double]) = (null, null) protected var effectivedims:Int override def applyKernel(kern: SVMKernel[DenseMatrix[Double]], M: Int = this.points.length):Unit = { if(M != this.points.length) { this.optimumSubset(M) } this.params = DenseVector.fill(M+1)(1.0) effectivedims = M+1 kernel = kern } override def applyFeatureMap: Unit = {} override def learn(): Unit = { this.params =ConjugateGradient.runCG(feature_a, b, this.initParams(), 0.0001, this.params.length) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/Pipe.scala
<filename>dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/Pipe.scala package io.github.mandar2812.dynaml.tensorflow import ammonite.ops.Path import io.github.mandar2812.dynaml.pipes.{DataPipe, DataPipe2} import io.github.mandar2812.dynaml.tensorflow.utils.{GaussianScalerTF, MinMaxScalerTF} import org.platanios.tensorflow.api._ import org.platanios.tensorflow.api.tf.image.DCTMethod private[tensorflow] object Pipe { /** * Read a file into a String Tensor * */ def read_image( num_channels: Int = 0, ratio: Int = 1, fancyUpscaling: Boolean = true, tryRecoverTruncated: Boolean = false, acceptableFraction: Float = 1, dctMethod: DCTMethod = DCTMethod.SystemDefault, name: String = "DecodeJpeg"): DataPipe[Path, Output] = DataPipe((image: Path) => tf.data.readFile(image.toString())) > DataPipe((content: Output) => tf.image.decodeJpeg( content, num_channels, ratio, fancyUpscaling, tryRecoverTruncated, acceptableFraction, dctMethod, name)) def resize_image( size: (Int, Int), alignCorners: Boolean = false, name: String = "ResizeArea"): DataPipe[Output, Output] = DataPipe((image_content: Output) => tf.image.resizeArea( image_content, Array(size._1, size._2), alignCorners, name) ) def extract_image_patch( heightRange: Range, widthRange: Range): DataPipe[Output, Output] = DataPipe((image: Output) => image( ---, heightRange.min :: heightRange.max + 1, widthRange.min :: widthRange.max + 1, ::) ) def gauss_std(ax: Int = 0): DataPipe[Tensor, (Tensor, GaussianScalerTF)] = DataPipe((labels: Tensor) => { val labels_mean = labels.mean(axes = ax) val n_data = labels.shape(0).scalar.asInstanceOf[Int].toDouble val labels_sd = labels.subtract(labels_mean).square.mean(axes = ax).multiply(n_data/(n_data - 1d)).sqrt val labels_scaler = GaussianScalerTF(labels_mean, labels_sd) val labels_scaled = labels_scaler(labels) (labels_scaled, labels_scaler) }) def minmax_std(ax: Int = 0): DataPipe[Tensor, (Tensor, MinMaxScalerTF)] = DataPipe((labels: Tensor) => { val labels_min = labels.min(axes = ax) val labels_max = labels.max(axes = ax) val labels_scaler = MinMaxScalerTF(labels_min, labels_max) val labels_scaled = labels_scaler(labels) (labels_scaled, labels_scaler) }) val gaussian_standardization: DataPipe2[Tensor, Tensor, ((Tensor, Tensor), (GaussianScalerTF, GaussianScalerTF))] = DataPipe2((features: Tensor, labels: Tensor) => { val (features_mean, labels_mean) = (features.mean(axes = 0), labels.mean(axes = 0)) val n_data = features.shape(0).scalar.asInstanceOf[Int].toDouble val (features_sd, labels_sd) = ( features.subtract(features_mean).square.mean(axes = 0).multiply(n_data/(n_data - 1d)).sqrt, labels.subtract(labels_mean).square.mean(axes = 0).multiply(n_data/(n_data - 1d)).sqrt ) val (features_scaler, labels_scaler) = ( GaussianScalerTF(features_mean, features_sd), GaussianScalerTF(labels_mean, labels_sd) ) val (features_scaled, labels_scaled) = ( features_scaler(features), labels_scaler(labels) ) ((features_scaled, labels_scaled), (features_scaler, labels_scaler)) }) val minmax_standardization: DataPipe2[Tensor, Tensor, ((Tensor, Tensor), (MinMaxScalerTF, MinMaxScalerTF))] = DataPipe2((features: Tensor, labels: Tensor) => { val (features_min, labels_min) = (features.min(axes = 0), labels.min(axes = 0)) val (features_max, labels_max) = ( features.max(axes = 0), labels.max(axes = 0) ) val (features_scaler, labels_scaler) = ( MinMaxScalerTF(features_min, features_max), MinMaxScalerTF(labels_min, labels_max) ) val (features_scaled, labels_scaled) = ( features_scaler(features), labels_scaler(labels) ) ((features_scaled, labels_scaled), (features_scaler, labels_scaler)) }) val gauss_minmax_standardization: DataPipe2[Tensor, Tensor, ((Tensor, Tensor), (GaussianScalerTF, MinMaxScalerTF))] = DataPipe2((features: Tensor, labels: Tensor) => { val features_mean = features.mean(axes = 0) val (labels_min, labels_max) = (labels.min(axes = 0), labels.max(axes = 0)) val n_data = features.shape(0).scalar.asInstanceOf[Int].toDouble val features_sd = features.subtract(features_mean).square.mean(axes = 0).multiply(n_data/(n_data - 1d)).sqrt val (features_scaler, labels_scaler) = ( GaussianScalerTF(features_mean, features_sd), MinMaxScalerTF(labels_min, labels_max) ) val (features_scaled, labels_scaled) = ( features_scaler(features), labels_scaler(labels) ) ((features_scaled, labels_scaled), (features_scaler, labels_scaler)) }) }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/gp/AbstractGPRegressionModel.scala
/* Copyright 2015 <NAME> Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.models.gp import breeze.linalg.{DenseMatrix, DenseVector, cholesky, trace} import breeze.numerics.{log, sqrt} import io.github.mandar2812.dynaml.algebra._ import io.github.mandar2812.dynaml.algebra.PartitionedMatrixOps._ import io.github.mandar2812.dynaml.algebra.PartitionedMatrixSolvers._ import io.github.mandar2812.dynaml.kernels._ import io.github.mandar2812.dynaml.models.{ContinuousProcessModel, SecondOrderProcessModel} import io.github.mandar2812.dynaml.optimization.GloballyOptWithGrad import io.github.mandar2812.dynaml.pipes.{DataPipe, DataPipe2} import io.github.mandar2812.dynaml.probability.{MultGaussianPRV, MultGaussianRV} import org.apache.log4j.Logger import scala.reflect.ClassTag /** * <h3>Gaussian Process Regression</h3> * * Single-Output Gaussian Process Regression Model * Performs gp/spline smoothing/regression with * vector inputs and a singular scalar output. * * @tparam T The data structure holding the training data. * * @tparam I The index set over which the Gaussian Process * is defined. * e.g: * * <ul> * <li>I = Double when implementing GP time series</li> * <li>I = DenseVector when implementing GP regression</li> * <ul> * * @param cov The covariance function/kernel of the GP model, * expressed as a [[LocalScalarKernel]] instance * * @param n Measurement noise covariance of the GP model. * * @param data Training data set of generic type [[T]] * * @param num The number of training data instances. * * @param meanFunc The mean/trend function of the GP model expressed as * a [[DataPipe]] instance. * */ abstract class AbstractGPRegressionModel[T, I: ClassTag]( cov: LocalScalarKernel[I], n: LocalScalarKernel[I], data: T, num: Int, meanFunc: DataPipe[I, Double] = DataPipe((_:I) => 0.0)) extends ContinuousProcessModel[T, I, Double, MultGaussianPRV] with SecondOrderProcessModel[T, I, Double, Double, DenseMatrix[Double], MultGaussianPRV] with GloballyOptWithGrad { private val logger = Logger.getLogger(this.getClass) /** * The GP is taken to be zero mean, or centered. * This is ensured by standardization of the data * before being used for further processing. * * */ override val mean: DataPipe[I, Double] = meanFunc override val covariance = cov val noiseModel = n override protected val g: T = data val npoints = num protected var blockSize = 1000 def blockSize_(b: Int): Unit = { blockSize = b } def _blockSize: Int = blockSize protected var (caching, kernelMatrixCache) : (Boolean, DenseMatrix[Double]) = (false, null) protected var partitionedKernelMatrixCache: PartitionedPSDMatrix = _ /** * Set the model "state" which * contains values of its hyper-parameters * with respect to the covariance and noise * kernels. * */ def setState(s: Map[String, Double]): this.type = { val (covHyp, noiseHyp) = ( s.filterKeys(covariance.hyper_parameters.contains), s.filterKeys(noiseModel.hyper_parameters.contains) ) covariance.setHyperParameters(covHyp) noiseModel.setHyperParameters(noiseHyp) current_state = covariance.state ++ noiseModel.state this } override protected var hyper_parameters: List[String] = covariance.hyper_parameters ++ noiseModel.hyper_parameters override protected var current_state: Map[String, Double] = covariance.state ++ noiseModel.state protected lazy val trainingData: Seq[I] = dataAsIndexSeq(g) protected lazy val trainingDataLabels = PartitionedVector( dataAsSeq(g).toStream.map(_._2), trainingData.length.toLong, _blockSize ) /** * Returns a [[DataPipe2]] which calculates the energy of data: [[T]]. * See: [[energy]] below. * */ def calculateEnergyPipe(h: Map[String, Double], options: Map[String, String]) = DataPipe2((training: Seq[I], trainingLabels: PartitionedVector) => { setState(h) val trainingMean = PartitionedVector( training.toStream.map(mean(_)), training.length.toLong, _blockSize ) //val effectiveTrainingKernel: LocalScalarKernel[I] = this.covariance + this.noiseModel //effectiveTrainingKernel.setBlockSizes((_blockSize, _blockSize)) val kernelTraining: PartitionedPSDMatrix = getTrainKernelMatrix AbstractGPRegressionModel.logLikelihood(trainingLabels - trainingMean, kernelTraining) }) /** * Calculates the energy of the configuration, * in most global optimization algorithms * we aim to find an approximate value of * the hyper-parameters such that this function * is minimized. * * @param h The value of the hyper-parameters in the configuration space * @param options Optional parameters about configuration * @return Configuration Energy E(h) * * In this particular case E(h) = -log p(Y|X,h) * also known as log likelihood. * */ override def energy(h: Map[String, Double], options: Map[String, String]): Double = calculateEnergyPipe(h, options)(trainingData, trainingDataLabels) /** * Returns a [[DataPipe]] which calculates the gradient of the energy, E(.) of data: [[T]] * with respect to the model hyper-parameters. * See: [[gradEnergy]] below. * */ def calculateGradEnergyPipe(h: Map[String, Double]) = DataPipe2((training: Seq[I], trainingLabels: PartitionedVector) => { try { covariance.setHyperParameters(h) noiseModel.setHyperParameters(h) val trainingMean = PartitionedVector( training.toStream.map(mean(_)), training.length.toLong, _blockSize ) val effectiveTrainingKernel: LocalScalarKernel[I] = covariance + noiseModel effectiveTrainingKernel.setBlockSizes((blockSize, blockSize)) val hParams = effectiveTrainingKernel.effective_hyper_parameters val gradMatrices = SVMKernel.buildPartitionedKernelGradMatrix( training, training.length, _blockSize, _blockSize, hParams, (x: I, y: I) => effectiveTrainingKernel.evaluate(x,y), (hy: String) => (x: I, y: I) => effectiveTrainingKernel.gradient(x,y)(hy)) val kernelTraining: PartitionedPSDMatrix = gradMatrices("kernel-matrix") val Lmat = bcholesky(kernelTraining) val alpha = Lmat.t \\ (Lmat \\ (trainingLabels-trainingMean)) hParams.map(h => { //build kernel derivative matrix val kernelDerivative: PartitionedMatrix = gradMatrices(h) //Calculate gradient for the hyper parameter h val grad: PartitionedMatrix = alpha*alpha.t*kernelDerivative - (Lmat.t \\ (Lmat \\ kernelDerivative)) (h.split("/").tail.mkString("/"), btrace(grad)) }).toMap } catch { case _: breeze.linalg.NotConvergedException => covariance.effective_hyper_parameters.map(h => (h, Double.NaN)).toMap ++ noiseModel.effective_hyper_parameters.map(h => (h, Double.NaN)).toMap } }) /** * Calculates the gradient energy of the configuration and * subtracts this from the current value of h to yield a new * hyper-parameter configuration. * * Over ride this function if you aim to implement a gradient based * hyper-parameter optimization routine like ML-II * * @param h The value of the hyper-parameters in the configuration space * @return Gradient of the objective function (marginal likelihood) as a Map * */ override def gradEnergy(h: Map[String, Double]): Map[String, Double] = calculateGradEnergyPipe(h)(trainingData, trainingDataLabels) protected def getTrainKernelMatrix[U <: Seq[I]] = { SVMKernel.buildPartitionedKernelMatrix(trainingData, trainingData.length, _blockSize, _blockSize, (x: I, y: I) => {covariance.evaluate(x, y) + noiseModel.evaluate(x, y)}) } protected def getCrossKernelMatrix[U <: Seq[I]](test: U) = SVMKernel.crossPartitonedKernelMatrix( trainingData, test, _blockSize, _blockSize, covariance.evaluate) protected def getTestKernelMatrix[U <: Seq[I]](test: U) = SVMKernel.buildPartitionedKernelMatrix( test, test.length.toLong, _blockSize, _blockSize, covariance.evaluate) /** * Calculates posterior predictive distribution for * a particular set of test data points. * * @param test A Sequence or Sequence like data structure * storing the values of the input patters. * */ override def predictiveDistribution[U <: Seq[I]](test: U): MultGaussianPRV = { println("\nGaussian Process Regression") println("Calculating posterior predictive distribution") //Calculate the kernel matrix on the training data val priorMeanTest = PartitionedVector( test.map(mean(_)) .grouped(_blockSize) .zipWithIndex.map(c => (c._2.toLong, DenseVector(c._1.toArray))) .toStream, test.length.toLong) val trainingMean = PartitionedVector( trainingData.map(mean(_)).toStream, trainingData.length.toLong, _blockSize ) val smoothingMat = if(!caching) { println("---------------------------------------------------------------") println("Calculating covariance matrix for training points") getTrainKernelMatrix } else { println("** Using cached training matrix **") partitionedKernelMatrixCache } println("---------------------------------------------------------------") println("Calculating covariance matrix for test points") val kernelTest = getTestKernelMatrix(test) println("---------------------------------------------------------------") println("Calculating covariance matrix between training and test points") val crossKernel = getCrossKernelMatrix(test) //Calculate the predictive mean and co-variance val (postPredictiveMean, postPredictiveCovariance) = AbstractGPRegressionModel.solve( trainingDataLabels, trainingMean, priorMeanTest, smoothingMat, kernelTest, crossKernel) MultGaussianPRV(test.length.toLong, _blockSize)( postPredictiveMean, postPredictiveCovariance) } /** * Draw three predictions from the posterior predictive distribution * * <ol> * <li>Mean or MAP estimate Y</li> * <li>Y- : The lower error bar estimate (mean - sigma*stdDeviation)</li> * <li>Y+ : The upper error bar. (mean + sigma*stdDeviation)</li> * </ol> * **/ def predictionWithErrorBars[U <: Seq[I]](testData: U, sigma: Int): Seq[(I, Double, Double, Double)] = { val posterior = predictiveDistribution(testData) val mean = posterior.mu.toStream val (lower, upper) = posterior.underlyingDist.confidenceInterval(sigma.toDouble) val lowerErrorBars = lower.toStream val upperErrorBars = upper.toStream println("Generating error bars") val preds = mean.zip(lowerErrorBars.zip(upperErrorBars)).map(t => (t._1, t._2._1, t._2._2)) (testData zip preds).map(i => (i._1, i._2._1, i._2._2, i._2._3)) } override def predict(point: I): Double = predictionWithErrorBars(Seq(point), 1).head._2 /** * Cache the training kernel and noise matrices * for fast access in future predictions. * */ override def persist(state: Map[String, Double]): Unit = { setState(state) val effectiveTrainingKernel: LocalScalarKernel[I] = covariance + noiseModel effectiveTrainingKernel.setBlockSizes((blockSize, blockSize)) partitionedKernelMatrixCache = SVMKernel.buildPartitionedKernelMatrix(trainingData, trainingData.length, _blockSize, _blockSize, effectiveTrainingKernel.evaluate) caching = true } /** * Forget the cached kernel & noise matrices. * */ def unpersist(): Unit = { kernelMatrixCache = null partitionedKernelMatrixCache = null caching = false } } object AbstractGPRegressionModel { private val logger = Logger.getLogger(this.getClass) /** * Calculate the parameters of the posterior predictive distribution * for a multivariate gaussian model. * */ def solve( trainingLabels: PartitionedVector, trainingMean: PartitionedVector, priorMeanTest: PartitionedVector, smoothingMat: PartitionedPSDMatrix, kernelTest: PartitionedPSDMatrix, crossKernel: PartitionedMatrix): (PartitionedVector, PartitionedPSDMatrix) = { val Lmat: LowerTriPartitionedMatrix = bcholesky(smoothingMat) val alpha: PartitionedVector = Lmat.t \\ (Lmat \\ (trainingLabels-trainingMean)) val v: PartitionedMatrix = Lmat \\ crossKernel val varianceReducer: PartitionedMatrix = v.t * v //Ensure that the variance reduction is symmetric val adjustedVarReducer: PartitionedMatrix = varianceReducer /*(varianceReducer.L + varianceReducer.L.t).map(bm => if(bm._1._1 == bm._1._2) (bm._1, bm._2*(DenseMatrix.eye[Double](bm._2.rows)*0.5)) else bm)*/ val reducedVariance: PartitionedPSDMatrix = new PartitionedPSDMatrix( (kernelTest - adjustedVarReducer).filterBlocks(c => c._1 >= c._2), kernelTest.rows, kernelTest.cols) (priorMeanTest + crossKernel.t * alpha, reducedVariance) } /** * Calculate the marginal log likelihood * of the training data for a pre-initialized * kernel and noise matrices. * * @param trainingData The function values assimilated as a [[DenseVector]] * * @param kernelMatrix The kernel matrix of the training features * * */ def logLikelihood(trainingData: DenseVector[Double], kernelMatrix: DenseMatrix[Double]): Double = { val smoothingMat = kernelMatrix try { val Lmat = cholesky(smoothingMat) val alpha = Lmat.t \ (Lmat \ trainingData) 0.5*((trainingData dot alpha) + trace(log(Lmat)) + trainingData.length*math.log(2*math.Pi)) } catch { case _: breeze.linalg.NotConvergedException => { logger.warn("Cholesky decomposition of kernel matrix not converged ... Returning +Inf") Double.PositiveInfinity } case _: breeze.linalg.MatrixNotSymmetricException => { logger.warn("Kernel matrix not symmetric ... Returning NaN") Double.NaN } } } /** * Calculate the marginal log likelihood * of the training data for a pre-initialized * kernel and noise matrices. * * @param trainingData The function values assimilated as a [[DenseVector]] * * @param kernelMatrix The kernel matrix of the training features * * */ def logLikelihood(trainingData: PartitionedVector, kernelMatrix: PartitionedPSDMatrix): Double = { val smoothingMat = kernelMatrix try { val Lmat = bcholesky(smoothingMat) val alpha: PartitionedVector = Lmat.t \\ (Lmat \\ trainingData) val d: Double = trainingData dot alpha 0.5*(d + btrace(blog(Lmat)) + trainingData.rows*math.log(2*math.Pi)) } catch { case _: breeze.linalg.NotConvergedException => Double.PositiveInfinity case _: breeze.linalg.MatrixNotSymmetricException => Double.PositiveInfinity } } def apply[M <: AbstractGPRegressionModel[Seq[(DenseVector[Double], Double)], DenseVector[Double]]](data: Seq[(DenseVector[Double], Double)], cov: LocalScalarKernel[DenseVector[Double]], noise: LocalScalarKernel[DenseVector[Double]] = new DiracKernel(1.0), order: Int = 0, ex: Int = 0, meanFunc: DataPipe[DenseVector[Double], Double] = DataPipe(_ => 0.0)): M = { assert(ex >= 0 && order >= 0, "Non Negative values for order and ex") if(order == 0) new GPRegression(cov, noise, data).asInstanceOf[M] else if(order > 0 && ex == 0) new GPNarModel(order, cov, noise, data).asInstanceOf[M] else new GPNarXModel(order, ex, cov, noise, data).asInstanceOf[M] } /** * Create an instance of [[AbstractGPRegressionModel]] for a * particular data type [[T]] * * @tparam T The type of the training data * @tparam I The type of the input patterns in the data set of type [[T]] * * @param cov The covariance function * @param noise The noise covariance function * @param meanFunc The trend or mean function * @param trainingdata The actual data set of type [[T]] * @param transform An implicit conversion from [[T]] to [[Seq]] represented as a [[DataPipe]] * */ def apply[T, I: ClassTag]( cov: LocalScalarKernel[I], noise: LocalScalarKernel[I], meanFunc: DataPipe[I, Double])( trainingdata: T, num: Int)( implicit transform: DataPipe[T, Seq[(I, Double)]]) = { val num_points = if(num > 0) num else transform(trainingdata).length new AbstractGPRegressionModel[T, I](cov, noise, trainingdata, num_points, meanFunc) { /** * Convert from the underlying data structure to * Seq[(I, Y)] where I is the index set of the GP * and Y is the value/label type. **/ override def dataAsSeq(data: T) = transform(data) } } /** * Create an instance of [[GPBasisFuncRegressionModel]] for a * particular data type [[T]] * * @tparam T The type of the training data * @tparam I The type of the input patterns in the data set of type [[T]] * * @param cov The covariance function * @param noise The noise covariance function * @param basisFunc A [[DataPipe]] transforming the input features to basis function components. * @param basis_param_prior A [[MultGaussianRV]] which is the prior * distribution on basis function coefficients * @param trainingdata The actual data set of type [[T]] * @param transform An implicit conversion from [[T]] to [[Seq]] represented as a [[DataPipe]] * */ def apply[T, I: ClassTag]( cov: LocalScalarKernel[I], noise: LocalScalarKernel[I], basisFunc: DataPipe[I, DenseVector[Double]], basis_param_prior: MultGaussianRV)( trainingdata: T, num: Int)( implicit transform: DataPipe[T, Seq[(I, Double)]]) = { val num_points = if(num > 0) num else transform(trainingdata).length new GPBasisFuncRegressionModel[T, I](cov, noise, trainingdata, num_points, basisFunc, basis_param_prior) { /** * Convert from the underlying data structure to * Seq[(I, Y)] where I is the index set of the GP * and Y is the value/label type. **/ override def dataAsSeq(data: T) = transform(data) } } } abstract class KroneckerGPRegressionModel[T, I: ClassTag, J: ClassTag]( cov: KroneckerProductKernel[I, J], n: KroneckerProductKernel[I, J], data: T, num: Int, meanFunc: DataPipe[(I, J), Double] = DataPipe((_:(I, J)) => 0.0)) extends AbstractGPRegressionModel[T, (I,J)](cov, n, data, num, meanFunc)
amitkumarj441/DynaML
build.sbt
import sbt._ import java.io.File import Dependencies._ import sbtbuildinfo.BuildInfoPlugin.autoImport._ maintainer := "<NAME> <<EMAIL>>" packageSummary := "Scala Library/REPL for Machine Learning Research" packageDescription := "DynaML is a Scala environment for conducting research and education in Machine Learning. DynaML comes packaged with a powerful library of classes for various predictive models and a Scala REPL where one can not only build custom models but also play around with data work-flows. It can also be used as an educational/research tool for data analysis." val mainVersion = "v1.5.3-beta.3" val dataDirectory = settingKey[File]("The directory holding the data files for running example scripts") val baseSettings = Seq( organization := "io.github.mandar2812", scalaVersion in ThisBuild := scala, resolvers in ThisBuild ++= Seq( "jzy3d-releases" at "http://maven.jzy3d.org/releases", "Scalaz Bintray Repo" at "http://dl.bintray.com/scalaz/releases", "BeDataDriven" at "https://nexus.bedatadriven.com/content/groups/public", Resolver.sonatypeRepo("public"), Resolver.sonatypeRepo("snapshots")), scalacOptions ++= Seq("-optimise", "-Yclosure-elim", "-Yinline", "-target:jvm-1.8") ) lazy val commonSettings = Seq( libraryDependencies ++= (linearAlgebraDependencies ++ baseDependencies ++ loggingDependency ++ apacheSparkDependency) ) lazy val settingsCore = Seq( libraryDependencies ++= ( chartsDependencies ++ tinkerpopDependency ++ openMLDependency ++ rejinDependency ++ rPackages ++ imageDependencies ++ dataFormatDependencies ++ tensorflowDependency ++ replDependency) ) lazy val pipes = (project in file("dynaml-pipes")).settings(baseSettings:_*) .settings(commonSettings:_*) .settings( name := "dynaml-pipes", version := mainVersion ) lazy val core = (project in file("dynaml-core")).settings(baseSettings) .settings(commonSettings:_*) .settings(settingsCore:_*) .enablePlugins(JavaAppPackaging, BuildInfoPlugin) .dependsOn(pipes) .settings( name := "dynaml-core", version := mainVersion ) lazy val examples = (project in file("dynaml-examples")) .settings(baseSettings:_*) .settings(commonSettings:_*) .settings( name := "dynaml-examples", version := mainVersion ).dependsOn(pipes, core) lazy val repl = (project in file("dynaml-repl")).enablePlugins(BuildInfoPlugin) .settings(baseSettings:_*) .settings( name := "dynaml-repl", version := mainVersion, buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion, sbtVersion), buildInfoPackage := "io.github.mandar2812.dynaml.repl", buildInfoUsePackageAsPath := true, libraryDependencies ++= (baseDependencies ++ replDependency ++ commons_io) ) lazy val notebook = (project in file("dynaml-notebook")).enablePlugins(JavaServerAppPackaging) .settings(baseSettings:_*) .settings( name := "dynaml-notebook", version := mainVersion, libraryDependencies ++= notebookInterfaceDependency ).dependsOn(core, examples, pipes, repl) .settings( mappings in Universal ++= Seq({ // we are using the reference.conf as default application.conf // the user can override settings here val init = (resourceDirectory in Compile).value / "DynaMLInit.scala" init -> "conf/DynaMLInit.scala" }, { val banner = (resourceDirectory in Compile).value / "dynamlBanner.txt" banner -> "conf/banner.txt" }, { val zeppelin_env = (resourceDirectory in Compile).value / "zeppelin-site.xml" zeppelin_env -> "conf/zeppelin-site.xml" }, { val zeppelin_shiro = (resourceDirectory in Compile).value / "shiro.ini.template" zeppelin_shiro -> "conf/shiro.ini" }, { val zeppelinConf = (resourceDirectory in Compile).value / "interpreter-setting.json" zeppelinConf -> "lib/interpreter-setting.json" }, { val common = (resourceDirectory in Compile).value / "common.sh" common -> "bin/common.sh" }, { val intp = (resourceDirectory in Compile).value / "interpreter.sh" intp -> "bin/interpreter.sh" }) ) lazy val DynaML = (project in file(".")).enablePlugins(JavaAppPackaging, BuildInfoPlugin, sbtdocker.DockerPlugin) .settings(baseSettings:_*) .dependsOn(core, examples, pipes, repl) .settings( name := "DynaML", version := mainVersion, fork in run := true, mainClass in Compile := Some("io.github.mandar2812.dynaml.DynaML"), buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion, sbtVersion), buildInfoPackage := "io.github.mandar2812.dynaml.repl", buildInfoUsePackageAsPath := true, mappings in Universal ++= Seq({ // we are using the reference.conf as default application.conf // the user can override settings here val init = (resourceDirectory in Compile).value / "DynaMLInit.scala" init -> "conf/DynaMLInit.scala" }, { val banner = (resourceDirectory in Compile).value / "dynamlBanner.txt" banner -> "conf/banner.txt" }, { val zeppelin_env = (resourceDirectory in Compile).value / "zeppelin-site.xml" zeppelin_env -> "conf/zeppelin-site.xml" }, { val zeppelin_shiro = (resourceDirectory in Compile).value / "shiro.ini.template" zeppelin_shiro -> "conf/shiro.ini" }, { val zeppelinConf = (resourceDirectory in Compile).value / "interpreter-setting.json" zeppelinConf -> "lib/interpreter-setting.json" }, { val common = (resourceDirectory in Compile).value / "common.sh" common -> "bin/common.sh" }, { val intp = (resourceDirectory in Compile).value / "interpreter.sh" intp -> "bin/interpreter.sh" }), javaOptions in Universal ++= Seq( // -J params will be added as jvm parameters "-J-Xmx2048m", "-J-Xms64m" ), dataDirectory := new File("data/"), initialCommands in console := """io.github.mandar2812.dynaml.DynaML.main(Array())""", dockerfile in docker := { val appDir: File = stage.value val targetDir = "/app" new Dockerfile { from("openjdk:8-jre") entryPoint(s"$targetDir/bin/${executableScriptName.value}") copy(appDir, targetDir, chown = "daemon:daemon") } }, imageNames in docker := Seq( // Sets the latest tag ImageName(s"mandar2812/${name.value.toLowerCase}:latest"), // Sets a name with a tag that contains the project version ImageName( namespace = Some("mandar2812"), repository = name.value.toLowerCase, tag = Some(version.value) ) ) ).aggregate(core, pipes, examples, repl).settings( aggregate in publishM2 := true, aggregate in update := false)
amitkumarj441/DynaML
dynaml-pipes/src/main/scala-2.11/io/github/mandar2812/dynaml/pipes/MetaPipe.scala
<gh_stars>0 package io.github.mandar2812.dynaml.pipes /** * Data Pipe wrapper of a curried function of order 1 * @author mandar2812 date: 21/02/2017. * * */ trait MetaPipe[Source, Intermediate, Destination] extends DataPipe[Source, DataPipe[Intermediate, Destination]] { self => /** * This is a modified form of the [[>]] operator in [[DataPipe]] * The pipe composition is applied to the resultant [[DataPipe]] * produced by the [[MetaPipe.run]] and the argument. * * @tparam Further The type of the result of the parameter. * @param next The data pipe to join * */ def >>[Further](next: DataPipe[Destination, Further]) : MetaPipe[Source, Intermediate, Further] = new MetaPipe[Source, Intermediate, Further] { override def run(data: Source) = self.run(data) > next } } object MetaPipe { def apply[Source, Intermediate, Destination]( f: (Source) => (Intermediate) => (Destination)) : MetaPipe[Source, Intermediate, Destination] = new MetaPipe[Source, Intermediate, Destination] { override def run(data: Source) = DataPipe(f(data)) } } /** * Wraps a curried function of order 1 which takes 2 arguments * and returns a [[Function1]]. * @author mandar2812 date: 21/02/2017 * * */ trait MetaPipe21[A, B, C, D] extends DataPipe2[A, B, DataPipe[C, D]] { self => def >>[E](next: DataPipe[D, E]): MetaPipe21[A, B, C, E] = new MetaPipe21[A, B, C, E] { override def run(data1: A, data2: B) = self.run(data1, data2) > next } } object MetaPipe21 { def apply[A, B, C, D](f: (A, B) => (C) => D): MetaPipe21[A, B, C, D] = new MetaPipe21[A, B, C, D]{ override def run(data1: A, data2: B) = DataPipe(f(data1, data2)) } } /** * Wraps a curried function of order 1 which takes 1 arguments * and returns a [[Function2]]. * @author mandar2812 date: 21/02/2017 * * */ trait MetaPipe12[A, B, C, D] extends DataPipe[A, DataPipe2[B, C, D]] { self => def >>[E](next: DataPipe[D, E]): MetaPipe12[A, B, C, E] = new MetaPipe12[A, B, C, E] { override def run(data: A) = self.run(data) > next } } object MetaPipe12 { def apply[A, B, C, D](f: (A) => (B, C) => D): MetaPipe12[A, B, C, D] = new MetaPipe12[A, B, C, D] { override def run(data: A) = DataPipe2(f(data)) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/statespace/SimData.scala
<reponame>amitkumarj441/DynaML package io.github.mandar2812.dynaml.models.statespace import breeze.numerics.{exp, log} import breeze.stats.distributions.{Exponential, Rand, Uniform} import POMP._ import akka.stream.scaladsl.Source import DataTypes._ import scala.Stream._ object SimData { /** * Stream of sde simulation which may make computation faster * @param x0 the starting value of the stream * @param t0 the starting time of the stream * @param precision the step size of the stream 10e(-precision) * @param stepFun the stepping function to use to generate the SDE Stream * @return a lazily evaluated stream from t0 */ def simSdeStream( x0: State, t0: Time, totalIncrement: TimeIncrement, precision: Int, stepFun: (State, TimeIncrement) => Rand[State]): Stream[Sde] = { val deltat: TimeIncrement = Math.pow(10, -precision) // define a recursive stream from t0 to t = t0 + totalIncrement stepping by 10e-precision lazy val stream: Stream[Sde] = Stream.cons(Sde(t0, x0), stream map (x => Sde(x.time + deltat, stepFun(x.state, deltat).draw))). takeWhile (s => s.time <= t0 + totalIncrement) stream } /** * Simulates an SDE at any specified times */ def simSdeIrregular( x0: State, times: List[Time], stepFun: (State, TimeIncrement) => Rand[State]): Vector[Sde] = { val t0 = times.head times.tail.foldLeft(Vector(Sde(t0, x0)))((a, t1) => { val dt = t1 - a.head.time val x1 = stepFun(a.head.state, dt).draw Sde(t1, x1) +: a }).reverse } /** * Specialist function for simulating the log-Gaussian Cox-Process using thinning */ def simLGCP( start: Time, end: Time, mod: StateSpaceModel, precision: Int): Vector[Data] = { // generate an SDE Stream val stateSpace = simSdeStream(mod.x0.draw, start, end - start, precision, mod.stepFunction) // Calculate the upper bound of the stream val upperBound = stateSpace.map(s => mod.f(s.state, s.time)). map(exp(_)).max def loop(lastEvent: Time, eventTimes: Vector[Data]): Vector[Data] = { // sample from an exponential distribution with the upper bound as the parameter val t1 = lastEvent + Exponential(upperBound).draw if (t1 > end) { eventTimes } else { // drop the elements we don't need from the stream, then calculate the hazard near that time val statet1 = stateSpace.takeWhile(s => s.time <= t1) val hazardt1 = statet1.map(s => mod.f(s.state, s.time)).last val stateEnd = statet1.last.state val gamma = mod.f(stateEnd, t1) val eta = mod.link(gamma) if (Uniform(0,1).draw <= exp(hazardt1)/upperBound) { loop(t1, Data(t1, true, Some(eta), Some(gamma), Some(statet1.last.state)) +: eventTimes) } else { loop(t1, eventTimes) } } } loop(start, stateSpace.map{ s => { val gamma = mod.f(s.state, s.time) val eta = mod.link(gamma) Data(s.time, false, Some(eta), Some(gamma), Some(s.state)) }}.toVector ) } /** * Generates a vector of event times from the Log-Gaussian Cox-Process * by thinning an exponential process */ def simLGCPEvents( start: Time, end: Time, mod: StateSpaceModel, precision: Int): Vector[Data] = { // generate an SDE Stream val stateSpace = simSdeStream(mod.x0.draw, start, end - start, precision, mod.stepFunction) // Calculate the upper bound of the stream val upperBound = stateSpace.map(s => mod.f(s.state, s.time)). map(exp(_)).max def loop(lastEvent: Time, eventTimes: Vector[Data]): Vector[Data] = { // sample from an exponential distribution with the upper bound as the parameter val t1 = lastEvent + Exponential(upperBound).draw if (t1 > end) { eventTimes.reverse } else { // drop the elements we don't need from the stream, then calculate the hazard near that time val statet1 = stateSpace.takeWhile(s => s.time <= t1) val hazardt1 = statet1.map(s => mod.f(s.state, s.time)).last val stateEnd = statet1.last.state val gamma = mod.f(stateEnd, t1) val eta = mod.link(gamma) if (Uniform(0,1).draw <= exp(hazardt1)/upperBound) { loop(t1, Data(t1, true, Some(eta), Some(gamma), Some(stateEnd)) +: eventTimes) } else { loop(t1, eventTimes) } } } loop(start, Vector()) } def simStep( x0: State, t0: Time, deltat: TimeIncrement, mod: StateSpaceModel): Data = { val x1 = mod.stepFunction(x0, deltat).draw val gamma = mod.f(x1, t0) val eta = mod.link(gamma) val y1 = mod.observation(eta).draw Data(t0, y1, Some(eta), Some(gamma), Some(x1)) } /** * Simulate data from a list of times, allowing for irregular observations */ def simData(times: Seq[Time], mod: StateSpaceModel): Vector[Data] = { val x0 = mod.x0.draw val d0 = simStep(x0, times.head, 0, mod) val data = times.tail.foldLeft(Vector[Data](d0)) { (acc, t) => val deltat = t - acc.head.t val x0 = acc.head.sdeState.get val d = simStep(x0, t, deltat, mod) d +: acc } data.reverse } /** * Simulate data as an Akka Stream, with regular time intervals * @param mod The model to simulate from, can be composed or single * @param precision Used to determine the step length, dt = 10**(-precision) */ def simStream(mod: StateSpaceModel, precision: Int, t0: Time): Source[Data, Any] = { val dt = math.pow(10, -precision) val x0 = mod.x0.draw val initialObservation = simStep(x0, t0, 0, mod) Source.unfold(initialObservation){d => Some((simStep(d.sdeState.get, d.t + dt, dt, mod), d)) } } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/probability/package.scala
<gh_stars>0 /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml import breeze.stats.distributions.ContinuousDistr import io.github.mandar2812.dynaml.pipes.DataPipe import scalaxy.streams.optimize import spire.algebra.InnerProductSpace /** * Contains helper functions pertaining to random variables. * @author mandar2812 date 11/01/2017. * */ package object probability { /** * Number of monte carlo samples to generate. * */ var candidates: Int = 10000 /** * Calculate the monte carlo estimate of the * first moment/expectation of a random variable. * * @tparam I The domain of the random variable. * @param rv The random variable from which to sample * @param f An implicit object representing an inner product on [[I]] * */ def E[@specialized(Double) I](rv: RandomVariable[I])(implicit f: InnerProductSpace[I, Double]): I = optimize { f.divr( rv.iid(candidates) .sample() .reduce( (x, y) => f.plus(x,y) ), candidates.toDouble) } def E(rv: RandomVariable[Double]): Double = optimize { rv.iid(candidates).sample().sum/candidates.toDouble } /** * Calculate the entropy of a random variable * */ def entropy[I, Distr <: ContinuousDistr[I]](rv: ContinuousRVWithDistr[I, Distr]): Double = { val logp_x: RandomVariable[Double] = MeasurableFunction[I, Double, ContinuousRVWithDistr[I, Distr]]( rv, DataPipe((sample: I) => -1d*rv.underlyingDist.logPdf(sample))) E(logp_x) } /** * KL divergence: * @param p The base random variable * @param q The random variable used to approximate p * @return The Kulback Leibler divergence KL(P||Q) * */ def KL[I, Distr <: ContinuousDistr[I]]( p: ContinuousRVWithDistr[I, Distr])( q: ContinuousRVWithDistr[I, Distr]): Double = { val log_q_p: RandomVariable[Double] = MeasurableFunction[I, Double, ContinuousRVWithDistr[I, Distr]]( p, DataPipe((sample: I) => p.underlyingDist.logPdf(sample)-q.underlyingDist.logPdf(sample))) E(log_q_p) } /** * Calculate the (monte carlo approximation to) mean, median, mode, lower and upper confidence interval. * * * * @param r Continuous random variable in question * @param alpha Probability of exclusion, i.e. return 100(1-alpha) % confidence interval. * alpha should be between 0 and 1 * */ def OrderStats(r: ContinuousRVWithDistr[Double, ContinuousDistr[Double]], alpha: Double) : (Double, Double, Double, Double, Double) = { val samples = r.iid(candidates).sample() val mean = samples.sum/candidates.toDouble val median = utils.median(samples) val (lowerbar, higherbar) = ( utils.quickselect(samples, math.ceil(candidates*alpha*0.5).toInt), utils.quickselect(samples, math.ceil(candidates*(1.0 - 0.5*alpha)).toInt)) val uDist = r.underlyingDist val mode = samples.map(s => (uDist.logPdf(s), s)).max._2 (mean, median, mode, lowerbar, higherbar) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/graphics/plot3d/package.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.graphics import org.jzy3d.analysis.{AnalysisLauncher, IAnalysis} import org.jzy3d.colors.colormaps.{ColorMapRainbow, IColorMap} import org.jzy3d.maths.Coord2d /** * <h3>DynaML 3d Plotting API</h3> * * Contains the user API for rendering 3d surface plots * in a convenient fashion. * * The [[plot3d.draw()]] methods provide for generating * 3d surfaces. * * To render the image on the system GUI, call the * [[plot3d.show()]] method using the result returned by [[plot3d.draw()]] * * @author mandar2812 date: 2018/05/11 * */ package object plot3d { /** * Generate a 3 dimensional surface from a function. * */ def draw( function: (Double, Double) => Double, xAxisLimits: (Float, Float) = (-3.0f, 3.0f), yAxisLimits: (Float, Float) = (-3.0f, 3.0f), xAxisBins : Int = 100, yAxisBins : Int = 100, displayWireFrame: Boolean = true, colorMap: IColorMap = new ColorMapRainbow): Surface = new Surface( function, xAxisLimits, yAxisLimits, xAxisBins, yAxisBins, displayWireFrame, colorMap) /** * Generated a tessellated surface from a [[Stream]] * of x, y & z coordinates. * */ def draw( points: Traversable[((Double, Double), Double)], colorMap: IColorMap): DelauneySurface = new DelauneySurface( points.map(p => ((p._1._1.toFloat, p._1._2.toFloat), p._2.toFloat)), colorMap) def draw(points: Traversable[((Double, Double), Double)]): DelauneySurface = new DelauneySurface(points.map(p => ((p._1._1.toFloat, p._1._2.toFloat), p._2.toFloat))) def draw(data: Traversable[(Double, Double)], numBins: Int): Histogram3D = new Histogram3D(data.map(d => new Coord2d(d._1.toFloat, d._2.toFloat)).toIterable, numBins) /** * Render a 3d surface on the system GUI. * */ def show(chart: IAnalysis): Unit = AnalysisLauncher.open(chart) }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/evaluation/RegressionMetricsSpark.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.evaluation import breeze.linalg.DenseVector import com.quantifind.charts.Highcharts._ import org.apache.log4j.{Priority, Logger} import org.apache.spark.Accumulator import org.apache.spark.broadcast.Broadcast import org.apache.spark.rdd.RDD import scalax.chart.module.ChartFactories.{XYBarChart, XYLineChart, XYAreaChart} /** * Class implementing the calculation * of regression performance evaluation * metrics * * */ class RegressionMetricsSpark(protected val scores: RDD[(Double, Double)], val len: Long) extends Metrics[Double] { override protected val scoresAndLabels = List() private val logger = Logger.getLogger(this.getClass) val length = len val (mae, rmse, rsq, rmsle):(Double, Double, Double, Double) = RegressionMetricsSpark.computeKPIs(scores, length) def residuals() = this.scores.map((s) => (s._1 - s._2, s._2)) def scores_and_labels() = this.scoresAndLabels override def print(): Unit = { println("Regression Model Performance: "+name) println("============================") scala.Predef.print("MAE = ") pprint.pprintln(mae) scala.Predef.print("RMSE = ") pprint.pprintln(rmse) scala.Predef.print("RMSLE = ") pprint.pprintln(rmsle) /* scala.Predef.print("R^2 = ") pprint.pprintln(Rsq) scala.Predef.print("Corr. Coefficient = ") pprint.pprintln(corr) scala.Predef.print("Model Yield = ") pprint.pprintln(modelYield) scala.Predef.print("Std Dev of Residuals = ") pprint.pprintln(sigma) */ } override def kpi() = DenseVector(mae, rmse, rsq) override def generatePlots(): Unit = { implicit val theme = org.jfree.chart.StandardChartTheme.createDarknessTheme val residuals = this.residuals().map(_._1).collect().toList println("Generating Plot of Residuals") /*val chart1 = XYBarChart(roccurve, title = "Residuals", legend = true) chart1.show()*/ histogram(residuals, numBins = 20) title("Histogram of Regression Residuals") } } object RegressionMetricsSpark { def computeKPIs(scoresAndLabels: RDD[(Double, Double)], size: Long) : (Double, Double, Double, Double) = { val mean: Accumulator[Double] = scoresAndLabels.context.accumulator(0.0, "mean") val err:DenseVector[Double] = scoresAndLabels.map((sc) => { val diff = sc._1 - sc._2 mean += sc._2 val difflog = math.pow(math.log(1 + math.abs(sc._1)) - math.log(math.abs(sc._2) + 1), 2) DenseVector(math.abs(diff), math.pow(diff, 2.0), difflog) }).reduce((a,b) => a+b) val SS_res = err(1) val mu: Broadcast[Double] = scoresAndLabels.context.broadcast(mean.value/size.toDouble) val SS_tot = scoresAndLabels.map((sc) => math.pow(sc._2 - mu.value, 2.0)).sum() val rmse = math.sqrt(SS_res/size.toDouble) val mae = err(0)/size.toDouble val rsq = if(1/SS_tot != Double.NaN) 1 - (SS_res/SS_tot) else 0.0 val rmsle = err(2)/size.toDouble (mae, rmse, rsq, rmsle) } }
amitkumarj441/DynaML
project/plugins.sbt
logLevel := Level.Info addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.7.0") //addSbtPlugin("ch.epfl.scala.index" % "sbt-scaladex" % "0.1.3")
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/svm/KernelSparkModel.scala
package io.github.mandar2812.dynaml.models.svm import breeze.linalg.{DenseMatrix, DenseVector, norm} import breeze.numerics.sqrt import io.github.mandar2812.dynaml.analysis.VectorField import org.apache.log4j.Logger import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.stat.Statistics import org.apache.spark.rdd.RDD import io.github.mandar2812.dynaml.models.KernelizedModel import io.github.mandar2812.dynaml.kernels._ import io.github.mandar2812.dynaml.prototype.{GreedyEntropySelector, QuadraticRenyiEntropy} import org.apache.spark.storage.StorageLevel import scala.util.Random /** * Implementation of the Fixed Size * Kernel based LS SVM * * Fixed Size implies that the model * chooses a subset of the original * data to calculate a low rank approximation * to the kernel matrix. * * Feature Extraction is done in the primal * space using the Nystrom approximation. * * @author mandar2812 */ abstract class KernelSparkModel(data: RDD[LabeledPoint], task: String) extends KernelizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], DenseVector[Double], DenseVector[Double], Double, Int, Int](task) with Serializable { override protected val g = LSSVMSparkModel.indexedRDD(data) protected var processed_g = g val colStats = Statistics.colStats(g.map(_._2.features)) override protected val nPoints: Long = colStats.count protected var featuredims: Int = g.first()._2.features.size protected var effectivedims: Int = featuredims + 1 protected var prototypes: List[DenseVector[Double]] = List() implicit val vecField = VectorField(featuredims) val logger = Logger.getLogger(this.getClass) override def getXYEdges: RDD[LabeledPoint] = data def getRegParam: Double def setRegParam(l: Double): this.type def setMiniBatchFraction(f: Double): this.type = { assert(f <= 1.0 && f >= 0.0, "Mini Batch Fraction must be between 0 and 1.0") this.optimizer.setMiniBatchFraction(f) this } override def initParams() = DenseVector.ones[Double](effectivedims) override def optimumSubset(M: Int): Unit = { points = (0L to this.npoints - 1).toList if (M < this.npoints) { logger.info("Calculating sample variance of the data set") //Get the original features of the data //Calculate the column means and variances val (mean, variance) = (DenseVector(colStats.mean.toArray), DenseVector(colStats.variance.toArray)) //Use the adjusted value of the variance val adjvarance: DenseVector[Double] = variance :/= (npoints.toDouble - 1) val density = new GaussianDensityKernel logger.info("Using Silvermans rule of thumb to set bandwidth of density kernel") logger.info("Std Deviation of the data: " + adjvarance.toString()) logger.info("norm: " + norm(adjvarance)) density.setBandwidth(DenseVector.tabulate[Double](featuredims) { i => 1.06 * math.sqrt(adjvarance(i)) / math.pow(npoints, 0.2) }) logger.info("Building low rank approximation to kernel matrix") prototypes = GreedyEntropySelector.subsetSelectionQRE(this.g, new QuadraticRenyiEntropy(density), M, 25, 0.00001) } } override def applyKernel(kernel: SVMKernel[DenseMatrix[Double]], M: Int = math.sqrt(npoints).toInt): Unit = { if(M != this.prototypes.length) { this.optimumSubset(M) } if(this.processed_g.first()._2.features.size > featuredims) { this.processed_g.unpersist() } val (mean, variance) = (DenseVector(colStats.mean.toArray), DenseVector(colStats.variance.toArray)) val scalingFunc = KernelSparkModel.scalePrototype(mean, variance) _ val scaledPrototypes = prototypes map scalingFunc val kernelMatrix = kernel.buildKernelMatrix(scaledPrototypes, M) val decomposition = kernelMatrix.eigenDecomposition(M) var selectedEigenVectors: List[DenseMatrix[Double]] = List() var selectedEigenvalues: List[Double] = List() (0 until M).foreach((p) => { //Check the Girolami criterion // (1.u)^2 >= 2M/(1+M) //This increases parsimony val u = decomposition._2(::, p) if(math.pow(norm(u,1), 2.0) >= 2.0*M/(1.0+M.toDouble)) { selectedEigenvalues :+= decomposition._1(p) selectedEigenVectors :+= u.toDenseMatrix } }) logger.info("Selected Components: "+selectedEigenvalues.length) effectivedims = selectedEigenvalues.length + 1 val decomp = (DenseVector(selectedEigenvalues.toArray), DenseMatrix.vertcat(selectedEigenVectors:_*).t) this.featureMap = kernel.featureMapping(decomp)(scaledPrototypes) this.params = DenseVector.ones[Double](effectivedims) } override def applyFeatureMap: Unit = { val meanb = g.context.broadcast(DenseVector(colStats.mean.toArray)) val varianceb = g.context.broadcast(DenseVector(colStats.variance.toArray)) val featureMapb = g.context.broadcast(featureMap) this.processed_g = g.map((point) => { val vec = DenseVector(point._2.features.toArray) val ans = vec - meanb.value ans :/= sqrt(varianceb.value) (point._1, new LabeledPoint( point._2.label, Vectors.dense(DenseVector.vertcat( featureMapb.value(ans), DenseVector(1.0)) .toArray) )) }).persist(StorageLevel.MEMORY_ONLY_SER) } override def trainTest(test: List[Long]) = { val training_data = this.processed_g.filter((keyValue) => !test.contains(keyValue._1)).map(_._2) val test_data = this.processed_g.filter((keyValue) => test.contains(keyValue._1)).map(_._2) training_data.persist(StorageLevel.MEMORY_AND_DISK) test_data.persist(StorageLevel.MEMORY_AND_DISK) (training_data, test_data) } /** * Calculates the energy of the configuration, * in most global optimization algorithms * we aim to find an approximate value of * the hyper-parameters such that this function * is minimized. * * @param h The value of the hyper-parameters in the configuration space * @param options Optional parameters about configuration * @return Configuration Energy E(h) **/ override def energy(h: Map[String, Double], options: Map[String, String]): Double = { //set the kernel paramters if options is defined //then set model parameters and cross validate var kernelflag = true if(options.contains("kernel")) { val kern = options("kernel") match { case "RBF" => new RBFKernel().setHyperParameters(h) case "Polynomial" => new PolynomialKernel().setHyperParameters(h) case "Exponential" => new ExponentialKernel().setHyperParameters(h) case "Laplacian" => new LaplacianKernel().setHyperParameters(h) case "Cauchy" => new CauchyKernel().setHyperParameters(h) case "RationalQuadratic" => new RationalQuadraticKernel().setHyperParameters(h) case "Wave" => new WaveKernel().setHyperParameters(h) } //check if h and this.current_state have the same kernel params //calculate kernParam(h) //calculate kernParam(current_state) //if both differ in any way then apply //the kernel val nprototypes = if(options.contains("subset")) options("subset").toInt else math.sqrt(this.npoints).toInt val kernh = h.filter((couple) => kern.hyper_parameters.contains(couple._1)) val kerncs = current_state.filter((couple) => kern.hyper_parameters.contains(couple._1)) if(!(kernh sameElements kerncs)) { this.applyKernel(kern, nprototypes) kernelflag = false } } this.applyFeatureMap val (_,_,e) = this.crossvalidate(4, h("RegParam"), optionalStateFlag = kernelflag) current_state = h 1.0-e } } object KernelSparkModel { def scalePrototype(mean: DenseVector[Double], variance: DenseVector[Double]) (prototype: DenseVector[Double]): DenseVector[Double] = (prototype - mean)/sqrt(variance) }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/utils/package.scala
<filename>dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/utils/package.scala /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml import java.io.{BufferedWriter, File, FileWriter} import breeze.linalg.{DenseMatrix, DenseVector, Matrix, MatrixNotSquareException, MatrixNotSymmetricException, kron} import com.github.tototoshi.csv.{CSVReader, DefaultCSVFormat, QUOTE_NONNUMERIC} import org.renjin.script.{RenjinScriptEngine, RenjinScriptEngineFactory} import org.renjin.sexp.SEXP import spire.algebra.InnerProductSpace import scala.io.Source import scala.reflect.runtime.{universe => ru} import scala.annotation.tailrec import scala.util.matching.Regex import sys.process._ import java.net.URL import breeze.stats.distributions.ContinuousDistr import io.github.mandar2812.dynaml.algebra.PartitionedMatrix import scalaxy.streams.optimize import spire.algebra.{Eq, Field} import scala.util.Random /** * A set of pre-processing utilities * and library functions. * */ package object utils { val log1pExp: (Double) => Double = (x) => {x + math.log1p(math.exp(-x))} /** * Get a [[CSVReader]] object from a file name and delimiter * character. * * @param file The file pathname as a String * @param delim The delimiter character used * to split the csv file. * @return A [[CSVReader]] object which can be * iterated for each line. * */ def getCSVReader(file: String, delim: Char): CSVReader = { implicit object MyFormat extends DefaultCSVFormat { override val delimiter = delim override val quoting = QUOTE_NONNUMERIC } CSVReader.open(new File(file)) } /** * Extract the diagonal elements of a breeze [[DenseMatrix]] * */ def diagonal(m: DenseMatrix[Double]): DenseMatrix[Double] = { require(m.rows == m.cols, "Matrix must be square to extract diagonal") m.mapPairs((index, value) => if(index._1 == index._2) value else 0.0) } /** * Extract the diagonal elements of a [[PartitionedMatrix]] * */ def diagonal[M <: PartitionedMatrix](pm: M): PartitionedMatrix = { require(pm.rows == pm.cols, "Blocked matrix must be square to extract diagonal") pm.map(pairs => { if(pairs._1._1 == pairs._1._2) (pairs._1, diagonal(pairs._2)) else (pairs._1, DenseMatrix.zeros[Double](pairs._2.rows, pairs._2.cols)) }) } /** * Get the mean and variance of a data set * which is a [[List]] of [[DenseVector]]. * * @param data The data set. * @return A [[Tuple2]] containing the mean * and variance * n-1. * * */ def getStats(data: List[DenseVector[Double]]): (DenseVector[Double], DenseVector[Double]) = { @tailrec def getStatsRec(d: List[DenseVector[Double]], m: DenseVector[Double], s: DenseVector[Double], i: Int): (DenseVector[Double], DenseVector[Double]) = d match { case Nil => (m, s) case x :: rest => val mnew = m + (x - m)/(i+1).toDouble getStatsRec(rest, mnew, s + (m*:*m) - (mnew*:*mnew) + ((x*:*x) - s - (m*:*m))/(i+1).toDouble, i + 1) } val n = data.length require(n > 1, "To calculate stats size of data must be > 1") val adjustment = n.toDouble/(n-1) val (mean, biasedSigmaSq) = getStatsRec( data.tail, data.head, DenseVector.zeros[Double](data.head.length), 1) (mean, biasedSigmaSq*adjustment) } /** * Get the mean and variance of a data set * which is a [[List]] of [[DenseVector]]. * * @param data The data set. * @return A [[Tuple2]] containing the mean * and variance. * * */ def getStatsMult(data: List[DenseVector[Double]]): (DenseVector[Double], DenseMatrix[Double]) = { def getStatsRec(d: List[DenseVector[Double]], m: DenseVector[Double], s: DenseMatrix[Double], i: Int): (DenseVector[Double], DenseMatrix[Double]) = d match { case Nil => (m,s) case x :: rest => val mnew = m + (x - m)/(i+1).toDouble getStatsRec(rest, mnew, s + (m*m.t) - (mnew*mnew.t) + ((x*x.t) - s - (m*m.t))/(i+1).toDouble, i + 1) } val n = data.length require(n > 1, "To calculate stats size of data must be > 1") val adjustment = n.toDouble/(n-1) val (mean, biasedSigmaSq) = getStatsRec( data.tail, data.head, data.head * data.head.t, 1) (mean, biasedSigmaSq*adjustment) } def getMinMax(data: List[DenseVector[Double]]): (DenseVector[Double], DenseVector[Double]) = { @tailrec def getMinMaxRec(d: List[DenseVector[Double]], m: DenseVector[Double], s: DenseVector[Double], i: Int): (DenseVector[Double], DenseVector[Double]) = d match { case Nil => (m, s) case x :: rest => getMinMaxRec(rest, DenseVector((x.toArray zip m.toArray).map(c => math.min(c._1, c._2))), DenseVector((x.toArray zip s.toArray).map(c => math.max(c._1, c._2))), i - 1) } getMinMaxRec( data.tail, data.head, data.head, data.length) } /** * Implementation of the quick-select algorithm. * */ def quickselect(list: Stream[Double], k: Int): Double = { require(k <= list.length && k > 0, "In quick-select, the search index must be between 1 and length of list") val random: (Int) => Int = Random.nextInt def quickSelectRec(list_sample: Seq[Double], k: Int, pivot: Double): Double = { val split_list = list_sample.partition(_ < pivot) val s = split_list._1.length if(s == k) { pivot } else if (s == 0 && list_sample.sum == pivot * list_sample.length) { pivot } else if(s < k) { quickSelectRec(split_list._2, k - s, split_list._2(random(split_list._2.length))) } else { quickSelectRec(split_list._1, k, split_list._1(random(split_list._1.length))) } } val arrayStream = list.toArray quickSelectRec(arrayStream, k-1, arrayStream(Random.nextInt(arrayStream.length))) } def median(list: Stream[Double]): Double = { val random: (Int) => Int = Random.nextInt def medianK(list_sample: Seq[Double], k: Int, pivot: Double): Double = { val split_list = list_sample.partition(_ < pivot) val s = split_list._1.length if(s == k) { pivot } else if (s == 0 && list_sample.sum == pivot * list_sample.length) { pivot } else if(s < k) { medianK(split_list._2, k - s, split_list._2(random(split_list._2.length))) } else { medianK(split_list._1, k, split_list._1(random(split_list._1.length))) } } if(list.length % 2 == 0) { val medA = medianK(list, list.length/2, list(random(list.length))) val medB = medianK(list, list.length/2 - 1, list(random(list.length))) (medA + medB)/2.0 } else { medianK(list, list.length/2, list(random(list.length))) } } /** * Convert a hyper-prior specification to a continuous distribution * over [[Map]] * */ def getPriorMapDistr(d: Map[String, ContinuousDistr[Double]]) = { new ContinuousDistr[Map[String, Double]] { override def unnormalizedLogPdf(x: Map[String, Double]) = { x.map(c => d(c._1).unnormalizedLogPdf(c._2)).sum } override def logNormalizer = d.values.map(_.logNormalizer).sum override def draw() = d.mapValues(_.draw()) } } /** * Calculates the Chebyshev polynomials of the first and second kind, * in a tail recursive manner, using their recurrence relations. * */ def chebyshev(n: Int, x: Double, kind: Int = 1): Double = { require( kind >= 1 && kind <= 2, "Chebyshev function can only be of the first or second kind") def chebyshev_T(k: Int, arg: Double, a: Double, b: Double): Double = k match { case 0 => a case 1 => b case _ => chebyshev_T(k-1, arg, b, 2*arg*b - a) } val c1 = if(kind == 1) x else 2*x chebyshev_T(n, x, 1, c1) } /** * Calculate the value of the hermite polynomials * tail recursively. This is needed to calculate * the Gaussian derivatives at a point x. * */ def hermite(n: Int, x: Double): Double = { @tailrec def hermiteHelper(k: Int, x: Double, a: Double, b: Double): Double = k match { case 0 => a case 1 => b case _ => hermiteHelper(k-1, x, b, x*b - (k-1)*a) } hermiteHelper(n, x, 1, x) } /** * Calculate the value of the Legendre polynomials * tail recursively. * */ def legendre(n: Int, x: Double): Double = { @tailrec def legendreHelper(k: Int, x: Double, a: Double, b: Double): Double = k match { case 0 => a case 1 => b case _ => legendreHelper(k-1, x, b, ((2*k - 1)*x*b - (k-1)*a)/k) } legendreHelper(n, x, 1, x) } /** * Calculates the Harmonic number function * for positive real arguments. * */ def H(x: Double): Double = { assert(x >= 0, "Harmonic number function in DynaML takes only non-negative arguments") def hRec(arg: Double, acc: Double): Double = math.floor(arg) match { case 0 => acc case n => hRec(arg-1, acc + (1d/n)) } hRec(x, 0d) } @tailrec def factorial(n: Int, accumulator: Long = 1): Long = { if(n == 0) accumulator else factorial(n - 1, accumulator*n) } def getTypeTag[T: ru.TypeTag](obj: T) = ru.typeTag[T] def combine[A](xs: Traversable[Traversable[A]]): Seq[Seq[A]] = xs.foldLeft(Seq(Seq.empty[A])) { (x, y) => optimize { for (a <- x.view; b <- y) yield a :+ b } } def range[I](min: I, max: I, steps: Int)(implicit field: InnerProductSpace[I, Double]): Stream[I] = { val step_size = field.divr(field.minus(max, min), steps) (0 until steps).toStream.map(i => field.plus(min, field.timesr(step_size, i))) } def downloadURL(url: String, saveAs: String): Unit = new URL(url) #> new File(saveAs) !! def replace(find: String)(replace: String)(input: String): String = { val pattern = new Regex(find) pattern.replaceAllIn(input, replace) } def textFileToStream(fileName: String): Stream[String] = Source.fromFile(new File(fileName)).getLines().toStream def strReplace(fileName: String)( findStringRegex: String, replaceString: String): Stream[String] = optimize { textFileToStream(fileName).map( replace(findStringRegex)(replaceString)) } def writeToFile(destination: String)(lines: Stream[String]): Unit = { val writer = new BufferedWriter(new FileWriter(new File(destination))) lines.foreach(line => { writer.write(line+"\n") }) writer.close() } def transformData(transform: (String) => String)(lines: Stream[String]): Stream[String] = optimize { lines.map(transform) } def extractColumns( lines: Stream[String], sep: String, columns: List[Int], naStrings:Map[Int, String]): Stream[String] = { val tFunc = (line: String) => { val fields = line.split(sep) optimize { val newFields:List[String] = columns.map(col => { if (!naStrings.contains(col) || fields(col) != naStrings(col)) fields(col) else "<NA>" }) val newLine = newFields.foldLeft("")( (str1, str2) => str1+sep+str2 ) newLine.tail } } transformData(tFunc)(lines) } /** * Construct a Haar transform matrix of size n * * NOTE: n must be a power of 2. * * */ def haarMatrix(n: Int) = { val pos = DenseMatrix(Array(1.0, 1.0)) val neg = DenseMatrix(Array(-1.0, 1.0)) val hMat = DenseMatrix(Array(1.0, 1.0), Array(-1.0, 1.0)) def haarMatrixAcc(i: Int, hMatAcc: DenseMatrix[Double]): DenseMatrix[Double] = i match { case `n` => hMatAcc case index => haarMatrixAcc(i*2, DenseMatrix.vertcat[Double]( kron(hMatAcc, pos), kron(DenseMatrix.eye[Double](i), neg))) } haarMatrixAcc(2, hMat) } def productField[Domain, Domain1](ev: Field[Domain], ev1: Field[Domain1])( implicit eqq: Eq[Domain], eqq1: Eq[Domain1]): Field[(Domain, Domain1)] = new Field[(Domain, Domain1)] { /*override def gcd(a: (Domain, Domain1), b: (Domain, Domain1)): (Domain, Domain1) = (ev.gcd(a._1, b._1), ev1.gcd(a._2, b._2))*/ override def gcd(a: (Domain, Domain1), b: (Domain, Domain1))(implicit eqq3: Eq[(Domain, Domain1)]) = (ev.gcd(a._1, b._1), ev1.gcd(a._2, b._2)) override def lcm(a: (Domain, Domain1), b: (Domain, Domain1))(implicit eqq3: Eq[(Domain, Domain1)]) = (ev.lcm(a._1, b._1), ev1.lcm(a._2, b._2)) override def quot(a: (Domain, Domain1), b: (Domain, Domain1)): (Domain, Domain1) = (ev.quot(a._1, b._1), ev1.quot(a._2, b._2)) override def mod(a: (Domain, Domain1), b: (Domain, Domain1)): (Domain, Domain1) = (ev.mod(a._1, b._1), ev1.mod(a._2, b._2)) override def negate(x: (Domain, Domain1)): (Domain, Domain1) = (ev.negate(x._1), ev1.negate(x._2)) override def zero: (Domain, Domain1) = (ev.zero, ev1.zero) override def one: (Domain, Domain1) = (ev.one, ev1.one) override def plus(x: (Domain, Domain1), y: (Domain, Domain1)): (Domain, Domain1) = (ev.plus(x._1, y._1), ev1.plus(x._2, y._2)) override def div(x: (Domain, Domain1), y: (Domain, Domain1)): (Domain, Domain1) = (ev.div(x._1, y._1), ev1.div(x._2, y._2)) override def times(x: (Domain, Domain1), y: (Domain, Domain1)): (Domain, Domain1) = (ev.times(x._1, y._1), ev1.times(x._2, y._2)) } def isSquareMatrix[V](mat: Matrix[V]): Unit = if (mat.rows != mat.cols) throw new MatrixNotSquareException def isSymmetricMatrix[V](mat: Matrix[V]): Unit = { isSquareMatrix(mat) optimize { for (i <- 0 until mat.rows; j <- 0 until i) if (mat(i,j) != mat(j,i)) throw new MatrixNotSymmetricException } } /** * Encapsulates renjin script engine and its capabilities. * */ object Renjin { private val r_engine_factory = new RenjinScriptEngineFactory() val renjin: RenjinScriptEngine = r_engine_factory.getScriptEngine() val r: String => SEXP = (s: String) => renjin.eval(s).asInstanceOf[SEXP] val R: java.io.File => Unit = (f: java.io.File) => renjin.eval(f) } }
amitkumarj441/DynaML
dynaml-notebook/src/main/scala-2.11/io/github/mandar2812/dynaml/DynaZeppelin.scala
<gh_stars>0 package io.github.mandar2812.dynaml import java.io.{InputStream, OutputStream} import java.nio.file.NoSuchFileException import ammonite.interp.Interpreter import ammonite.ops.{Path, read} import ammonite.repl.{RemoteLogger, Repl} import ammonite.runtime.{Frame, Storage} import ammonite.util.Util.newLine import ammonite.util._ import io.github.mandar2812.dynaml.repl.{Defaults, DynaMLInterpreter, DynaMLRepl, Scripts} import org.apache.commons.io.output.ByteArrayOutputStream import org.apache.zeppelin.server.ZeppelinServer /** * Contains the various entry points to the DynaML/Ammonite REPL. * * Configuration of the basic REPL is done by passing in arguments when * constructing the [[DynaZeppelin]] instance, and the various entrypoints such * as [[run]] [[runScript]] and so on are methods on that instance. * * It is more or less equivalent to the [[ammonite.repl.Repl]] object itself, and has * a similar set of parameters, but does not have any of the [[ammonite.repl.Repl]]'s * implementation-related code and provides a more convenient set of * entry-points that a user can call. * * Note that the [[instantiateRepl]] function generates a new [[Repl]] * every time it is called! * * @param predefCode Any additional code you want to run before the REPL session * starts. Can contain multiple blocks separated by `@`s * @param defaultPredef Do you want to include the "standard" predef imports * provided by Ammonite? These include tools like `time`, * `grep`, the `|` or `|?` pipes from ammonite-ops, and * other helpers. Can be disabled to give a clean * namespace for you to fill using your own predef. * @param storageBackend Where will all of Ammonite's persistent data get * stored? Things like any `predef.sc` file, * compilation/ivy caches, etc.. Defaults include * [[Storage.Folder]] and [[Storage.InMemory]], though * you can create your own. * @param wd The working directory of the REPL; when it load scripts, where * the scripts will be considered relative to when assigning them * packages * * @param inputStream Where input to the Repl is coming from, typically System.in, * but it could come from somewhere else e.g. across the * network in the case of the SshdRepl * @param outputStream Primary output of code run using Ammonite * @param errorStream Error output when things go bad, typically System.err; also * gets sent miscellaneous info messages that aren't strictly * part of the REPL or script's output */ case class DynaZeppelin( predefCode: String = "", predefFile: Option[Path] = None, defaultPredef: Boolean = true, storageBackend: Storage = new Storage.Folder(Defaults.ammoniteHome), wd: Path = ammonite.ops.pwd, welcomeBanner: Option[String] = Some(Defaults.welcomeBanner), inputStream: InputStream = System.in, outputStream: ByteArrayOutputStream = new ByteArrayOutputStream(), errorStream: OutputStream = System.err, verboseOutput: Boolean = true, remoteLogging: Boolean = true, colors: Colors = Colors.Default){ def loadedPredefFile = predefFile match{ case Some(path) => try Right(Some(PredefInfo(Name("FilePredef"), read(path), false, Some(path)))) catch{case e: NoSuchFileException => Left((Res.Failure("Unable to load predef file " + path), Seq(path -> 0L))) } case None => Right(None) } /** * Instantiates an ammonite.Repl using the configuration */ def instantiateRepl(replArgs: IndexedSeq[Bind[_]] = Vector.empty, remoteLogger: Option[RemoteLogger]) = { loadedPredefFile.right.map{ predefFileInfoOpt => val augmentedPredef = DynaZeppelin.maybeDefaultPredef( defaultPredef, Defaults.replPredef + Defaults.predefString + Defaults.dynaMlPredef ) val argString = replArgs.zipWithIndex.map{ case (b, idx) => s""" val ${b.name} = ammonite .repl .ReplBridge .value .Internal .replArgs($idx) .value .asInstanceOf[${b.typeTag.tpe}] """ }.mkString(newLine) new DynaMLRepl( inputStream, outputStream, errorStream, storage = storageBackend, basePredefs = Seq( PredefInfo(Name("DefaultPredef"), augmentedPredef, true, None), PredefInfo(Name("ArgsPredef"), argString, false, None) ), customPredefs = predefFileInfoOpt.toSeq ++ Seq( PredefInfo(Name("CodePredef"), predefCode, false, None) ), wd = wd, welcomeBanner = welcomeBanner, replArgs = replArgs, remoteLogger = remoteLogger, initialColors = colors ) } } def instantiateInterpreter() = { loadedPredefFile.right.flatMap { predefFileInfoOpt => val augmentedPredef = DynaZeppelin.maybeDefaultPredef( defaultPredef, Defaults.predefString + DynaZeppelin.extraPredefString ) val (colorsRef, printer) = Interpreter.initPrinters( colors, outputStream, errorStream, verboseOutput ) val frame = Frame.createInitial() val interp: Interpreter = new Interpreter( printer, storageBackend, basePredefs = Seq( PredefInfo(Name("DefaultPredef"), augmentedPredef, false, None) ), predefFileInfoOpt.toSeq ++ Seq( PredefInfo(Name("CodePredef"), predefCode, false, None) ), Vector.empty, wd, colorsRef, verboseOutput, () => frame ) interp.initializePredef() match{ case None => Right(interp) case Some(problems) => Left(problems) } } } def instantiateDynaZepplinInterpreter() = { loadedPredefFile.right.flatMap { predefFileInfoOpt => val augmentedPredef = DynaZeppelin.maybeDefaultPredef( defaultPredef, Defaults.predefString + DynaZeppelin.extraPredefString ) val (colorsRef, printer) = DynaMLInterpreter.initPrinters( colors, outputStream, errorStream, verboseOutput, autoFlush = false ) val frame = Frame.createInitial() val interp: DynaMLInterpreter = new DynaMLInterpreter( printer, storageBackend, basePredefs = Seq( PredefInfo(Name("DefaultPredef"), augmentedPredef, false, None) ), predefFileInfoOpt.toSeq ++ Seq( PredefInfo(Name("CodePredef"), predefCode, false, None) ), Vector.empty, wd, colorsRef, verboseOutput, () => frame ) interp.initializePredef() match{ case None => Right(interp) case Some(problems) => Left(problems) } } } /** * Run the REPL, with any additional bindings you wish to provide. * * Returns an `Any` representing any value that the user passed into the * `exit` call when closing the REPL (defaults to `(): Unit`). Also returns * a sequence of paths that were watched as a result of this REPL run, in * case you wish to re-start the REPL when any of them change. */ def run(replArgs: Bind[_]*): (Res[Any], Seq[(Path, Long)]) = { val remoteLogger = if (!remoteLogging) None else Some(new ammonite.repl.RemoteLogger(storageBackend.getSessionId)) remoteLogger.foreach(_.apply("Boot")) instantiateRepl(replArgs.toIndexedSeq, remoteLogger) match{ case Left(missingPredefInfo) => missingPredefInfo case Right(repl) => repl.initializePredef().getOrElse{ // Warm up the compilation logic in the background, hopefully while the // user is typing their first command, so by the time the command is // submitted it can be processed by a warm compiler val warmupThread = new Thread(new Runnable{ def run() = repl.warmup() }) // This thread will terminal eventually on its own, but if the // JVM wants to exit earlier this thread shouldn't stop it warmupThread.setDaemon(true) warmupThread.start() try { val exitValue = Res.Success(repl.run()) (exitValue.map(repl.beforeExit), repl.interp.watchedFiles) } finally { remoteLogger.foreach(_.close()) } } } } /** * Run a Scala script file! takes the path to the file as well as an array * of `args` and a map of keyword `kwargs` to pass to that file. */ def runScript(path: Path, scriptArgs: Seq[(String, Option[String])]) : (Res[Any], Seq[(Path, Long)]) = { instantiateInterpreter() match{ case Right(interp) => val result = Scripts.runScript(wd, path, interp, scriptArgs) (result, interp.watchedFiles) case Left(problems) => problems } } /** * Run a snippet of code */ def runCode(code: String) = { instantiateInterpreter() match{ case Right(interp) => val res = interp.processExec(code, 0, () => ()) (res, interp.watchedFiles) case Left(problems) => problems } } } object DynaZeppelin extends App { ZeppelinServer.main(this.args) def maybeDefaultPredef(enabled: Boolean, predef: String) = if (enabled) predef else "" /** * Detects if the console is interactive; lets us make console-friendly output * (e.g. ansi color codes) if it is, and script-friendly output (no ansi codes) * if it's not * * https://stackoverflow.com/a/1403817/871202 */ def isInteractive() = System.console() != null val extraPredefString = s""" |import ammonite.main.Router.{doc, main} |import ammonite.main.Scripts.pathScoptRead |""".stripMargin }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/modelpipe/DLSSVMPipe.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.modelpipe import breeze.linalg.DenseVector import io.github.mandar2812.dynaml.kernels.LocalScalarKernel import io.github.mandar2812.dynaml.models.svm.DLSSVM /** * Created by mandar on 15/6/16. */ class DLSSVMPipe[Source](pre: (Source) => Stream[(DenseVector[Double], Double)], cov: LocalScalarKernel[DenseVector[Double]], task: String = "regression") extends ModelPipe[Source, Stream[(DenseVector[Double], Double)], DenseVector[Double], Double, DLSSVM] { override val preProcess = pre override def run(data: Source) = { val training = preProcess(data) new DLSSVM(training, training.length, cov, task) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/evaluation/RegressionMetricsTF.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.evaluation import com.quantifind.charts.Highcharts.{regression, title, xAxis, yAxis} import io.github.mandar2812.dynaml.tensorflow.{dtf, dtfutils} import org.platanios.tensorflow.api.{::, Tensor} /** * Implements a common use for Regression Task Evaluators. * */ class RegressionMetricsTF(preds: Tensor, targets: Tensor) extends MetricsTF( Seq("RMSE", "MAE", "Pearson Corr.", "Spearman Corr.", "Yield"), preds, targets) { private val num_outputs = if (preds.shape.toTensor().size == 1) 1 else preds.shape(1) private lazy val (_ , rmse , mae, corr, spearman_corr) = RegressionMetricsTF.calculate(preds, targets) private lazy val modelyield = (preds.max(axes = 0) - preds.min(axes = 0)).divide(targets.max(axes = 0) - targets.min(axes = 0)) override protected def run(): Tensor = dtf.stack(Seq(rmse, mae, corr, spearman_corr, modelyield)) override def generatePlots(): Unit = { println("Generating Plot of Fit for each target") if(num_outputs == 1) { val (pr, tar) = ( dtfutils.toDoubleSeq(scoresAndLabels._1), dtfutils.toDoubleSeq(scoresAndLabels._2)) regression(pr.zip(tar).toSeq) title("Goodness of fit: "+name) xAxis("Predicted "+name) yAxis("Actual "+name) } else { (0 until num_outputs).foreach(output => { val (pr, tar) = ( dtfutils.toDoubleSeq(scoresAndLabels._1(::, output)), dtfutils.toDoubleSeq(scoresAndLabels._2(::, output))) regression(pr.zip(tar).toSeq) }) } } } /** * Implements core logic of [[RegressionMetricsTF]] * */ object RegressionMetricsTF { protected def calculate(preds: Tensor, targets: Tensor): (Tensor, Tensor, Tensor, Tensor, Tensor) = { val error = targets.subtract(preds) println("Shape of error tensor: "+error.shape.toString()+"\n") val num_instances = error.shape(0) val rmse = error.square.mean(axes = 0).sqrt val mae = error.abs.mean(axes = 0) val corr = { val mean_preds = preds.mean(axes = 0) val mean_targets = targets.mean(axes = 0) val preds_c = preds.subtract(dtf.stack(Seq.fill(num_instances)(mean_preds))) val targets_c = targets.subtract(dtf.stack(Seq.fill(num_instances)(mean_targets))) val (sigma_t, sigma_p) = (targets_c.square.mean(axes = 0).sqrt, preds_c.square.mean(axes = 0).sqrt) preds_c.multiply(targets_c).mean(axes = 0).divide(sigma_t.multiply(sigma_p)) } val sp_corr = { val (ranks_preds, ranks_targets) = ( preds.topK(num_instances)._2.cast(preds.dataType), targets.topK(num_instances)._2.cast(targets.dataType)) val mean_rank_preds = ranks_preds.mean(axes = 0) val mean_rank_targets = ranks_targets.mean(axes = 0) val rank_preds_c = preds.subtract(dtf.stack(Seq.fill(num_instances)(mean_rank_preds))) val rank_targets_c = targets.subtract(dtf.stack(Seq.fill(num_instances)(mean_rank_targets))) val (sigma_t, sigma_p) = (rank_targets_c.square.mean(axes = 0).sqrt, rank_preds_c.square.mean(axes = 0).sqrt) rank_preds_c.multiply(rank_targets_c).mean(axes = 0).divide(sigma_t.multiply(sigma_p)) } (error, rmse, mae, corr, sp_corr) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/kernels/CoRegGraphKernel.scala
package io.github.mandar2812.dynaml.kernels import breeze.linalg.{DenseMatrix, pinv} import io.github.mandar2812.dynaml.utils /** * @author mandar2812 date: 30/08/16. * * In co-regionalization models for multi-output gaussian processes, * one comes across the graph regularizer. the class below is an implementation * of such. * * @param m The symmetric adjacency matrix of the graph generated by the nodes indexed * by integers. */ class CoRegGraphKernel(m: DenseMatrix[Double]) extends LocalSVMKernel[Int] { utils.isSquareMatrix(m) utils.isSymmetricMatrix(m) val dimensions = m.rows state = { for(i <- 0 until dimensions; j <- 0 until dimensions) yield (i,j) }.filter((coup) => coup._1 <= coup._2) .map(c => "M_"+c._1+"_"+c._2 -> m(c._1, c._2)) .toMap override val hyper_parameters: List[String] = state.keys.toList def adjecancyMatrix(config: Map[String, Double]) = DenseMatrix.tabulate[Double](dimensions, dimensions){(i, j) => if(i <= j) config("M_"+i+"_"+j) else config("M_"+j+"_"+i) } def degreeMatrix(config: Map[String, Double]) = DenseMatrix.eye[Double](dimensions) :* ((adjecancyMatrix(config) * DenseMatrix.ones[Double](dimensions, dimensions)) + adjecancyMatrix(config)) def l(config: Map[String, Double]): DenseMatrix[Double] = pinv(degreeMatrix(config) - adjecancyMatrix(config)) override def gradientAt(config: Map[String, Double])(x: Int, y: Int): Map[String, Double] = hyper_parameters.map(_ -> 1.0).toMap override def evaluateAt(config: Map[String, Double])(x: Int, y: Int): Double = l(config)(x,y) override def setHyperParameters(h: Map[String, Double]): CoRegGraphKernel.this.type = { super.setHyperParameters(h) this } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/data/DataSet.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.tensorflow.data import io.github.mandar2812.dynaml.pipes._ import org.platanios.tensorflow.api._ import org.platanios.tensorflow.api.implicits.helpers.{DataTypeAuxToDataType, OutputToTensor} import org.platanios.tensorflow.api.ops.Function import org.platanios.tensorflow.api.ops.io.data.{Data, Dataset, OutputDataset, OutputSlicesDataset} /** * <h3>DynaML Data Set</h3> * * The data set class, wraps an underlying * collection of elements of type [[X]]. * * It can be used to create an object which * can access a potentially large number * of data patterns. * * It is also possible to transform the * data collection in the classical functional * paradigm of [[map()]], [[flatMap()]]. * * @tparam X The type of each data pattern. * * @param data The underlying data collection, * represented as an [[Iterable]] * of elements, each of type [[X]]. * * @author mandar2812 date 2018/07/17 * */ class DataSet[X](val data: Iterable[X]) { self => lazy val size: Int = data.toSeq.length /** * Filter elements of this data set which satisfy * a predicate. * */ def filter(pipe: DataPipe[X, Boolean]): DataSet[X] = DataSet[X](self.data.filter(pipe(_))) /** * Filter elements of this data set which does not * satisfy a predicate. * */ def filterNot(pipe: DataPipe[X, Boolean]): DataSet[X] = DataSet[X](self.data.filterNot(pipe(_))) /** * Creates a new data set of type [[Y]] * */ def map[Y](func: X => Y): DataSet[Y] = DataSet[Y](data.map(func)) /** * Creates a new data set of type [[Y]] * */ def map[Y](pipe: DataPipe[X, Y]): DataSet[Y] = map(pipe.run _) def map(func: X => Output): OutputDataSet = OutputDataSet(data.map(func)) def map(pipe: DataPipe[X, Output]): OutputDataSet = OutputDataSet(data.map(pipe.run)) /** * Maps each element into a collection of elements of type [[Y]], * and then concatenates each resulting collection into a single * data set. * */ def flatMap[Y](func: X => Iterable[Y]): DataSet[Y] = DataSet[Y](data.flatMap(func)) /** * Maps each element into a collection of elements of type [[Y]], * and then concatenates each resulting collection into a single * data set. * */ def flatMap[Y](pipe: DataPipe[X, Iterable[Y]]): DataSet[Y] = flatMap(pipe.run _) /** * Create a data set consisting of ([[X]], [[Y]]) pairs. * */ def zip[Y](other: DataSet[Y]): ZipDataSet[X, Y] = ZipDataSet[X, Y](self, other) /** * Join the current data collection with another collection * */ def concatenate(other: DataSet[X]): DataSet[X] = DataSet[X](self.data ++ other.data) /** * Transform the underlying collection in a way that uses potentially all of its elements. * */ def transform[Y](transformation: DataPipe[Iterable[X], Iterable[Y]]): DataSet[Y] = DataSet[Y](transformation(data)) def grouped(num: Int): DataSet[Seq[X]] = transform( DataPipe((d: Iterable[X]) => d.grouped(num).toIterable.map(_.toSeq)) ) def reduce[Y](transformation: DataPipe[Iterable[X], Y]): Y = transformation(data) def reduce[Y >: X](reducePipe: DataPipe2[Y, Y, Y]): Y = data.reduce[Y](reducePipe(_, _)) def reduceLeft[Y >: X](reducePipe: DataPipe2[Y, X, Y]): Y = data.reduceLeft[Y](reducePipe(_, _)) def scanLeft[Y](z: Y)(scanPipe: DataPipe2[Y, X, Y]): DataSet[Y] = DataSet(data.scanLeft(z)(scanPipe(_, _))) def scan[Y >: X](z: Y)(scanPipe: DataPipe2[Y, Y, Y]): DataSet[Y] = DataSet(data.scan(z)(scanPipe(_, _))) /** * Split the data collection into a train-test split. * * @return A result of type [[TFDataSet]], containing * both the training and test splits. * */ def partition(f: DataPipe[X, Boolean]): TFDataSet[X] = { val data_split = data.partition(f(_)) TFDataSet(DataSet(data_split._1), DataSet(data_split._2)) } def to_zip[Y, Z](f: DataPipe[X, (Y, Z)]): ZipDataSet[Y, Z] = { val data_split = data.map(f(_)).unzip ZipDataSet[Y, Z](DataSet[Y](data_split._1), DataSet[Z](data_split._2)) } /** * Convert the current collection into an instance * of [[SupervisedDataSet]]. * */ def to_supervised[Y, Z](f: DataPipe[X, (Y, Z)]): SupervisedDataSet[Y, Z] = { val data_split = data.map(f(_)).unzip SupervisedDataSet[Y, Z](DataSet[Y](data_split._1), DataSet[Z](data_split._2)) } /** * Construct a TensorFlow data set, from * the current data collection * * @tparam T The tensor type. * @tparam O Symbolic tensor (output) type. * @tparam DA The type of the auxiliary data structure * @tparam D The type of the data type objects for each data element. * @tparam S The type of the object representing the shape of the data tensors. * * @param transformation Either a data pipe from [[X]] to [[T]] or from [[X]] to [[O]] * @param dataType The data type of the underlying patterns. * @param shape The shape of the data patterns, defaults to null, i.e. is * inferred during run time. * * @return A TensorFlow data set handle. * */ def build[T, O, DA, D, S]( transformation: Either[DataPipe[X, T], DataPipe[X, O]], dataType: DA, shape: S)( implicit evDAToD: DataTypeAuxToDataType.Aux[DA, D], evData: Data.Aux[T, O, D, S], evOToT: OutputToTensor.Aux[O, T], evFunctionOutput: Function.ArgType[O] ): Dataset[T, O, D, S] = transformation match { case Left(pipe) => tf.data.fromGenerator( () => self.data.map(pipe(_)), dataType, shape) case Right(pipe) => self.data .map(x => tf.data.OutputDataset(pipe(x))) .reduceLeft[Dataset[T, O, D, S]]( (a, b) => a.concatenate(b) ) } protected def build[T, O, DA, D, S]( transformation: DataPipe[Iterable[X], Iterable[O]], dataType: DA, shape: S)( implicit evDAToD: DataTypeAuxToDataType.Aux[DA, D], evData: Data.Aux[T, O, D, S], evOToT: OutputToTensor.Aux[O, T], evFunctionOutput: Function.ArgType[O]): Dataset[T, O, D, S] = self .transform(transformation) .map(DataPipe((batch: O) => tf.data.OutputSlicesDataset[T, O, D, S](batch))) .reduceLeft(DataPipe2((l: Dataset[T, O, D, S], r: OutputSlicesDataset[T, O, D, S]) => l.concatenate(r))) def build_buffered[T, O, DA, D, S]( buffer_size: Int, stackOp: DataPipe[Iterable[O], O], dataType: DA, shape: S = null)( implicit convertToOutput: DataPipe[X, O], evDAToD: DataTypeAuxToDataType.Aux[DA, D], evData: Data.Aux[T, O, D, S], evOToT: OutputToTensor.Aux[O, T], evFunctionOutput: Function.ArgType[O]): Dataset[T, O, D, S] = { val buffer_and_stack = DataPipe((d: Iterable[X]) => d.grouped(buffer_size).toIterable) > IterableDataPipe(IterableDataPipe(convertToOutput)) > IterableDataPipe(stackOp) build(buffer_and_stack, dataType, shape) } } object DataSet { def apply[X](data: Iterable[X]): DataSet[X] = new DataSet(data) } case class OutputDataSet(override val data: Iterable[Output]) extends DataSet[Output](data) { self => def build[T, DA, D, S](dataType: DA, shape: S)( implicit evDAToD: DataTypeAuxToDataType.Aux[DA, D], evData: Data.Aux[T, Output, D, S], evOToT: OutputToTensor.Aux[Output, T], evFunctionOutput: Function.ArgType[Output]): Dataset[T, Output, D, S] = tf.data.OutputSlicesDataset(tf.concatenate(self.data.toSeq)) } /** * A data collection consisting of ([[X]], [[Y]]) pairs. * */ class ZipDataSet[X, Y]( val dataset1: DataSet[X], val dataset2: DataSet[Y]) extends DataSet[(X, Y)](dataset1.data.zip(dataset2.data)) { self => def unzip: (DataSet[X], DataSet[Y]) = (dataset1, dataset2) def join[Z](other: ZipDataSet[X, Z]): ZipDataSet[X, (Y, Z)] = { val otherMap = other.data.toMap val joined_data = self.data.map(pattern => { (pattern._1, (pattern._2, otherMap.get(pattern._1))) }).filter(_._2._2.isDefined) .map(p => (p._1, (p._2._1, p._2._2.get))).unzip ZipDataSet(joined_data._1, joined_data._2) } } object ZipDataSet { def apply[X, Y]( dataset1: DataSet[X], dataset2: DataSet[Y]): ZipDataSet[X, Y] = new ZipDataSet(dataset1, dataset2) def apply[X, Y]( dataset1: Iterable[X], dataset2: Iterable[Y]): ZipDataSet[X, Y] = new ZipDataSet( DataSet(dataset1), DataSet(dataset2) ) } /** * <h3>Supervised Data Set</h3> * * A data collection with features of type [[X]] and * targets of type [[Y]], suitable for supervised learning * tasks. * * */ case class SupervisedDataSet[X, Y]( features: DataSet[X], targets: DataSet[Y]) extends ZipDataSet[X, Y](features, targets) { self => override def partition(f: DataPipe[(X, Y), Boolean]): TFDataSet[(X, Y)] = { val data_split = data.partition(f(_)) val (features_train, targets_train) = data_split._1.unzip val (features_test, targets_test) = data_split._2.unzip TFDataSet( SupervisedDataSet(new DataSet[X](features_train), new DataSet[Y](targets_train)), SupervisedDataSet(new DataSet[X](features_test), new DataSet[Y](targets_test))) } } case class TFDataSet[T]( training_dataset: DataSet[T], test_dataset: DataSet[T])
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/utils/MinMaxScalerTF.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.tensorflow.utils import org.platanios.tensorflow.api._ import _root_.io.github.mandar2812.dynaml.pipes._ /** * Scales attributes of a vector pattern using the sample minimum and maximum of * each dimension. * * @param min Sample minimum of the data * @param max Sample maximum of each data dimension * @author mandar2812 date: 07/03/2018. * * */ case class MinMaxScalerTF(min: Tensor, max: Tensor) extends TFScaler { val delta: Tensor = max.subtract(min) override val i: Scaler[Tensor] = Scaler((xc: Tensor) => xc.multiply(delta).add(min)) override def run(data: Tensor): Tensor = data.subtract(min).divide(delta) def apply(indexers: Indexer*): MinMaxScalerTF = this.copy(min(indexers:_*), max(indexers:_*)) } case class MinMaxScalerTO(min: Output, max: Output) extends TOScaler { val delta: Output = max.subtract(min) override val i: Scaler[Output] = Scaler((xc: Output) => xc.multiply(delta).add(min)) override def run(data: Output): Output = data.subtract(min).divide(delta) def apply(indexers: Indexer*): MinMaxScalerTO = this.copy(min(indexers:_*), max(indexers:_*)) }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/statespace/DataTypes.scala
<reponame>amitkumarj441/DynaML<gh_stars>0 package io.github.mandar2812.dynaml.models.statespace import POMP._ object DataTypes { /** * A description containing the modelled quantities and observations * sdeState = x_t = p(x_t | x_t-1) * gamma = f(x_t) * eta = g(gamma) * observation = pi(eta) */ case class Data( t: Time, observation: Observation, eta: Option[Eta], gamma: Option[Gamma], sdeState: Option[State]) { override def toString = { if (!sdeState.isEmpty) { s"$t, $observation, ${eta.get.head}, ${gamma.get}, " + sdeState.get.flatten.mkString(", ") } else { t + ", " + observation } } } /** * Given a sorted set of data, removes duplicate sequential entries * by time, even if they have different observations */ def removeDupTimes(data: Vector[Data]): Vector[Data] = { val sortedData = data.tail.foldLeft(Vector(data.head))((acc, a) => if (a.t == acc.head.t) acc else a +: acc) sortedData.reverse } /** * Representing intervals sampled from the empirical filtering distribution p(x_t | y_t) * @param lower the lower interval * @param upper the upper interval */ case class CredibleInterval(lower: Double, upper: Double) { override def toString = lower + ", " + upper } /** * The structure which the particle filter returns, * @param time the time of the process * @param observation an optional observation, note discretely observed processes cannot be seen at all time points continuously * @param state the mean of the empirical filtering distribution at time 'time' * @param stateIntervals the credible intervals of the filtering distribution */ case class PfOut( time: Time, observation: Option[Observation], eta: Double, etaIntervals: CredibleInterval, state: State, stateIntervals: IndexedSeq[CredibleInterval]) { override def toString = { observation match { case Some(x) => s"$time, $x, $eta, ${etaIntervals.toString}, ${state.flatten.mkString(", ")}, ${stateIntervals.mkString(", ")}" case None => s"$time, NA, $eta, ${etaIntervals.toString}, ${state.flatten.mkString(", ")}, ${stateIntervals.mkString(", ")}" } } } /** * A representation of a simulated Diffusion process */ case class Sde(time: Time, state: State) { override def toString = time + "," + state.toString } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/analysis/package.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml import breeze.linalg._ import io.github.mandar2812.dynaml.pipes._ import io.github.mandar2812.dynaml.utils._ /** * Utilities for computational real analysis * * @author mandar2812 date 2017/08/15 * */ package object analysis { /** * Generates a fourier series feature mapping. * */ object FourierBasisGenerator extends DataPipe2[Double, Int, Basis[Double]] { override def run(omega: Double, components: Int): Basis[Double] = Basis((x: Double) => { if(components % 2 == 0) { DenseVector( (Seq(1d) ++ (-components/2 to components/2) .filterNot(_ == 0) .map(i => if(i < 0) math.cos(i*omega*x) else math.sin(i*omega*x)) ).toArray ) } else { DenseVector( (Seq(1d) ++ (-(components/2).toInt - 1 to components/2) .filterNot(_ == 0) .map(i => if(i < 0) math.cos(i*omega*x) else math.sin(i*omega*x)) ).toArray ) } }) } /** * Generates a polynomial feature mapping upto a specified degree. * */ object PolynomialBasisGenerator extends DataPipe[Int, Basis[Double]] { override def run(degree: Int): Basis[Double] = { Basis((x: Double) => DenseVector((0 to degree).map(d => math.pow(x, d)).toArray)) } } /** * Generate a basis of Cardinal Cubic B-Splines * */ object CubicSplineGenerator extends DataPipe[Seq[Int], Basis[Double]] { override def run(knotIndices: Seq[Int]): Basis[Double] = Basis((x: Double) => DenseVector(Array(1d) ++ knotIndices.toArray.map(k => CardinalBSplineGenerator(k, 3)(x)))) } /** * Generate a basis of Bernstein Polynomials * */ object BernsteinSplineSeriesGenerator extends DataPipe[Seq[Int], Basis[Double]] { override def run(knotIndices: Seq[Int]): Basis[Double] = Basis((x: Double) => DenseVector(Array(1d) ++ knotIndices.toArray.map(k => BernsteinSplineGenerator(k, 3)(x)))) } /** * Generate a basis of Chebyshev functions * */ object ChebyshevBasisGenerator extends DataPipe2[Int, Int, Basis[Double]] { override def run(maxdegree: Int, kind: Int): Basis[Double] = { require(kind == 1 || kind == 2, "Chebyshev functions are either of the first or second kind") Basis( (x: Double) => DenseVector( (0 to maxdegree).map(d => chebyshev(d,x,kind)).toArray ) ) } } /** * Generate a hermite polynomial basis. * * */ object HermiteBasisGenerator extends DataPipe[Int, Basis[Double]] { override def run(maxdegree: Int): Basis[Double] = Basis( (x: Double) => DenseVector( (0 to maxdegree).map(d => hermite(d, x)).toArray ) ) } /** * Generate a legendre polynomial basis * */ object LegendreBasisGenerator extends DataPipe[Int, Basis[Double]] { override def run(maxdegree: Int): Basis[Double] = Basis( (x: Double) => DenseVector( (0 to maxdegree).map(d => legendre(d, x)).toArray ) ) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/evaluation/ClassificationMetricsTF.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.evaluation import org.platanios.tensorflow.api.Tensor /** * Evaluates classification models, by calculating confusion matrices. * * @param num_classes The number of classes in the classification task * @param preds Predictions expressed as class probabilities/one-hot vectors * @param targets Class labels expressed as one-hot vectors * */ class ClassificationMetricsTF(num_classes: Int, preds: Tensor, targets: Tensor) extends MetricsTF(Seq("Class Fidelity Score"), preds, targets) { val confusion_matrix: Tensor = targets.transpose().matmul(preds) val class_score: Tensor = { val d = confusion_matrix.trace val s = confusion_matrix.sum() d.divide(s) } override protected def run(): Tensor = class_score override def print(): Unit = { println("\nClassification Model Performance: "+name) scala.Predef.print("Number of classes: ") pprint.pprintln(num_classes) println("============================") println() println("Confusion Matrix: ") println(confusion_matrix.summarize(maxEntries = confusion_matrix.size.toInt)) println() scala.Predef.print("Class Prediction Fidelity Score: ") pprint.pprintln(class_score.scalar.asInstanceOf[Float]) } }
amitkumarj441/DynaML
dynaml-examples/src/main/scala-2.11/io/github/mandar2812/dynaml/examples/TestAdult.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.examples import java.io.File import breeze.linalg.{DenseMatrix, DenseVector => BDV} import com.github.tototoshi.csv.CSVWriter import io.github.mandar2812.dynaml.DynaMLPipe import io.github.mandar2812.dynaml.evaluation.BinaryClassificationMetrics import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import io.github.mandar2812.dynaml.kernels.{RBFKernel, SVMKernel} import io.github.mandar2812.dynaml.modelpipe.GLMPipe import io.github.mandar2812.dynaml.models.KernelizedModel import io.github.mandar2812.dynaml.models.lm.GeneralizedLinearModel import io.github.mandar2812.dynaml.models.svm.{KernelSparkModel, LSSVMSparkModel} import io.github.mandar2812.dynaml.pipes._ /** * @author mandar2812 on 1/7/15. */ object TestAdult { def apply(nCores: Int = 4, prototypes: Int = 1, kernel: String, globalOptMethod: String = "gs", grid: Int = 7, step: Double = 0.45, logscale: Boolean = false, frac: Double, executors: Int = 1, paraFactor: Int = 2): BDV[Double] = { val dataRoot = "data/" val trainfile = dataRoot+"adult.csv" val testfile = dataRoot+"adulttest.csv" val config = Map( "file" -> trainfile, "delim" -> ",", "head" -> "false", "task" -> "classification", "parallelism" -> nCores.toString, "executors" -> executors.toString, "factor" -> paraFactor.toString ) val configtest = Map("file" -> testfile, "delim" -> ",", "head" -> "false") val conf = new SparkConf().setAppName("Adult").setMaster("local["+nCores+"]") conf.registerKryoClasses(Array(classOf[LSSVMSparkModel], classOf[KernelSparkModel], classOf[KernelizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], BDV[Double], BDV[Double], Double, Int, Int]], classOf[SVMKernel[DenseMatrix[Double]]], classOf[RBFKernel], classOf[BDV[Double]], classOf[DenseMatrix[Double]])) val sc = new SparkContext(conf) val model = LSSVMSparkModel(config, sc) val nProt = if (kernel == "Linear") { model.npoints.toInt } else { if(prototypes > 0) prototypes else math.sqrt(model.npoints.toDouble).toInt } model.setBatchFraction(frac) val (optModel, optConfig) = KernelizedModel.getOptimizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], model.type](model, globalOptMethod, kernel, nProt, grid, step, logscale) optModel.setMaxIterations(2).learn() val met = optModel.evaluate(configtest) met.print() println("Optimal Configuration: "+optConfig) val scale = if(logscale) "log" else "linear" val perf = met.kpi() val row = Seq(kernel, prototypes.toString, globalOptMethod, grid.toString, step.toString, scale, perf(0), perf(1), perf(2), optConfig.toString) val writer = CSVWriter.open(new File("data/resultsAdult.csv"), append = true) writer.writeRow(row) writer.close() optModel.unpersist perf } } object TestAdultLogistic { def apply(training: Int = 1000, columns: List[Int] = List(6, 0, 1, 2, 3, 4, 5), stepSize: Double = 0.01, maxIt: Int = 30, mini: Double = 1.0, regularization: Double = 0.5, modelType: String = "logistic") = { val modelpipe = new GLMPipe( (tt: ((Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)]), (BDV[Double], BDV[Double]))) => tt._1._1, task = "classification", modelType = modelType ) > DynaMLPipe.trainParametricModel[ Stream[(BDV[Double], Double)], BDV[Double], BDV[Double], Double, Stream[(BDV[Double], Double)], GeneralizedLinearModel[Stream[(BDV[Double], Double)]] ](regularization, stepSize, maxIt, mini) val testPipe = DataPipe( (modelAndData: (GeneralizedLinearModel[Stream[(BDV[Double], Double)]], Stream[(BDV[Double], Double)])) => { val pipe1 = StreamDataPipe((couple: (BDV[Double], Double)) => { (modelAndData._1.predict(couple._1), couple._2) }) val scoresAndLabelsPipe = pipe1 val scoresAndLabels = scoresAndLabelsPipe.run(modelAndData._2).toList val metrics = new BinaryClassificationMetrics( scoresAndLabels, scoresAndLabels.length, logisticFlag = true) metrics.setName("Adult Income") metrics.print() metrics.generatePlots() }) val preProcessPipe = DynaMLPipe.fileToStream > DynaMLPipe.extractTrainingFeatures(columns, Map()) > DynaMLPipe.splitFeaturesAndTargets val scaleFeatures = StreamDataPipe((pattern:(BDV[Double], Double)) => (pattern._1, math.max(pattern._2, 0.0))) val procTraining = preProcessPipe > DataPipe((data: Stream[(BDV[Double], Double)]) => data.take(training)) > scaleFeatures val procTest = preProcessPipe > scaleFeatures val trainTestPipe = DataPipe(procTraining, procTest) > DynaMLPipe.featuresGaussianStandardization > BifurcationPipe(modelpipe, DataPipe((tt: ( (Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)]), (BDV[Double], BDV[Double]))) => tt._1._2)) > testPipe trainTestPipe(("data/adult.csv", "data/adulttest.csv")) } }
amitkumarj441/DynaML
dynaml-examples/src/main/scala-2.11/io/github/mandar2812/dynaml/examples/TestMagicGamma.scala
package io.github.mandar2812.dynaml.examples import java.io.File import breeze.linalg.{DenseMatrix, DenseVector} import com.github.tototoshi.csv.CSVWriter import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import io.github.mandar2812.dynaml.kernels.{RBFKernel, SVMKernel} import io.github.mandar2812.dynaml.models.KernelizedModel import io.github.mandar2812.dynaml.models.svm.{KernelSparkModel, LSSVMSparkModel} /** * @author mandar2812 */ object TestMagicGamma { def apply(nCores: Int = 4, prototypes: Int = 1, kernel: String, globalOptMethod: String = "gs", grid: Int = 7, step: Double = 0.45, logscale: Boolean = false, dataRoot: String = "data/", executors: Int = 1, paraFactor: Int = 2): DenseVector[Double] = { val config = Map("file" -> "data/magicgamma.csv", "delim" -> ",", "head" -> "false", "task" -> "classification", "parallelism" -> nCores.toString, "executors" -> executors.toString, "factor" -> paraFactor.toString) val configtest = Map("file" -> "data/magicgammatest.csv", "delim" -> ",", "head" -> "false") val conf = new SparkConf().setAppName("Magicgamma").setMaster("local["+nCores+"]") //conf.set("spark.executor.memory", "3g") conf.registerKryoClasses(Array(classOf[LSSVMSparkModel], classOf[KernelSparkModel], classOf[KernelizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], DenseVector[Double], DenseVector[Double], Double, Int, Int]], classOf[SVMKernel[DenseMatrix[Double]]], classOf[RBFKernel], classOf[DenseVector[Double]], classOf[DenseMatrix[Double]], classOf[org.apache.log4j.Logger])) val sc = new SparkContext(conf) val model = LSSVMSparkModel(config, sc) val nProt = if (kernel == "Linear") { model.npoints.toInt } else { if(prototypes > 0) prototypes else math.sqrt(model.npoints.toDouble).toInt } val (optModel, optConfig) = KernelizedModel.getOptimizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], model.type](model, globalOptMethod, kernel, nProt, grid, step, logscale) optModel.setMaxIterations(2).learn() val met = optModel.evaluate(configtest) met.print() println("Optimal Configuration: "+optConfig) val scale = if(logscale) "log" else "linear" val perf = met.kpi() val row = Seq(kernel, prototypes.toString, globalOptMethod, grid.toString, step.toString, scale, perf(0), perf(1), perf(2), optConfig.toString) val writer = CSVWriter.open(new File("data/resultsMagicGamma.csv"), append = true) writer.writeRow(row) writer.close() optModel.unpersist perf } }
amitkumarj441/DynaML
dynaml-pipes/src/main/scala-2.11/io/github/mandar2812/dynaml/pipes/StreamDataPipe.scala
<reponame>amitkumarj441/DynaML<gh_stars>0 /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.pipes import scala.collection.GenTraversable import scalaxy.streams.optimize /** * @author mandar2812 on 17/11/15. * * Represents an abstract stream data pipeline. * @tparam I The type of a single source record * @tparam J The result type of a single record. * */ trait StreamDataPipe[I, J, K] extends DataPipe[Stream[I], K]{ /** * The functions that compose the * pipe operations. * * */ val pipe: (I) => J /** * The function which writes * to the destination * */ override def run(data: Stream[I]): K } /** * A pipeline which takes a [[Stream]] of data and * performs the scala `map`operation. * */ trait StreamMapPipe[I, J] extends StreamDataPipe[I, J, Stream[J]] { override def run(data: Stream[I]): Stream[J] = optimize { data.map(pipe) } } /** * A pipeline which takes a [[Stream]] of data and * performs the scala `flatMap` operation. * */ trait StreamFlatMapPipe[I, J] extends StreamDataPipe[I, Stream[J], Stream[J]] { override def run(data: Stream[I]) = optimize { data.flatMap(pipe) } } trait StreamFilterPipe[I] extends StreamDataPipe[I, Boolean, Stream[I]] { override def run(data: Stream[I]): Stream[I] = optimize { data.filter(pipe) } } trait StreamPartitionPipe[I] extends StreamDataPipe[I, Boolean, (Stream[I], Stream[I])] { override def run(data: Stream[I]): (Stream[I], Stream[I]) = optimize { data.partition(pipe) } } trait StreamSideEffectPipe[I] extends StreamDataPipe[I, Unit, Unit] { override def run(data: Stream[I]): Unit = data.foreach(pipe) } object StreamDataPipe { def toStreamPipe[I, S <: GenTraversable[I]] = new DataPipe[S, Stream[I]] { override def run(data: S) = data.toStream } //Stream pipes which map from the original domain to a new one def apply[I, J](mapFunc: (I) => J): StreamMapPipe[I, J] = new StreamMapPipe[I, J] { val pipe = mapFunc } def apply[I, J](map: DataPipe[I, J]): StreamMapPipe[I, J] = new StreamMapPipe[I, J] { val pipe = map.run _ } //Stream pipes which act as filters def apply[I](mapFunc: (I) => Boolean): StreamFilterPipe[I] = new StreamFilterPipe[I] { val pipe = mapFunc } def apply[I](mapFunc: DataPipe[I, Boolean]): StreamFilterPipe[I] = new StreamFilterPipe[I] { val pipe = mapFunc.run _ } //stream pipes with side effects def apply[I](seFunc: (I) => Unit): StreamSideEffectPipe[I] = new StreamSideEffectPipe[I] { val pipe = seFunc } def apply[I](seFunc: SideEffectPipe[I]): StreamSideEffectPipe[I] = new StreamSideEffectPipe[I] { val pipe = seFunc.run _ } } object StreamFlatMapPipe { def apply[I, J](mapFunc: (I) => Stream[J]) = new StreamFlatMapPipe[I, J] { override val pipe = mapFunc } def apply[I, J](mapFunc: DataPipe[I, Stream[J]]) = new StreamFlatMapPipe[I, J] { override val pipe = mapFunc.run _ } } object StreamPartitionPipe { def apply[I](mapFunc: (I) => Boolean): StreamPartitionPipe[I] = new StreamPartitionPipe[I] { val pipe = mapFunc } def apply[I](mapFunc: DataPipe[I, Boolean]): StreamPartitionPipe[I] = new StreamPartitionPipe[I] { val pipe = mapFunc.run _ } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/utils/GaussianScaler.scala
<filename>dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/utils/GaussianScaler.scala /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.utils import breeze.linalg.{DenseMatrix, DenseVector, cholesky, inv} import io.github.mandar2812.dynaml.pipes.{ReversibleScaler, Scaler} /** * Scales attributes of a vector pattern using the sample mean and variance of * each dimension. This assumes that there is no covariance between the data * dimensions. * * @param mean Sample mean of the data * @param sigma Sample variance of each data dimension * @author mandar2812 date: 17/6/16. * * */ case class GaussianScaler(mean: DenseVector[Double], sigma: DenseVector[Double]) extends ReversibleScaler[DenseVector[Double]]{ override val i: Scaler[DenseVector[Double]] = Scaler((pattern: DenseVector[Double]) => (pattern *:* sigma) + mean) override def run(data: DenseVector[Double]): DenseVector[Double] = (data-mean) :/ sigma def apply(r: Range): GaussianScaler = GaussianScaler(mean(r), sigma(r)) def apply(index: Int): UnivariateGaussianScaler = UnivariateGaussianScaler(mean(index), sigma(index)) def ++(other: GaussianScaler) = GaussianScaler( DenseVector(this.mean.toArray++other.mean.toArray), DenseVector(this.sigma.toArray++other.sigma.toArray)) } /** * Scales the attributes of a data pattern using the sample mean and covariance matrix * calculated on the data set. This allows standardization of multivariate data sets * where the covariance of individual data dimensions is not negligible. * * @param mean Sample mean of data * @param sigma Sample covariance matrix of data. * */ case class MVGaussianScaler(mean: DenseVector[Double], sigma: DenseMatrix[Double]) extends ReversibleScaler[DenseVector[Double]] { val sigmaInverse = cholesky(inv(sigma)) override val i: Scaler[DenseVector[Double]] = Scaler((pattern: DenseVector[Double]) => (inv(sigmaInverse.t) * pattern) + mean) override def run(data: DenseVector[Double]): DenseVector[Double] = sigmaInverse.t * (data - mean) def apply(r: Range): MVGaussianScaler = MVGaussianScaler(mean(r), sigma(r,r)) } case class UnivariateGaussianScaler(mean: Double, sigma: Double) extends ReversibleScaler[Double] { require(sigma > 0.0, "Std. Deviation for gaussian scaling must be strictly positive!") /** * The inverse operation of this scaling. * **/ override val i = Scaler((pattern: Double) => (pattern*sigma) + mean) override def run(data: Double) = (data-mean)/sigma }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/layers/StackOutputs.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.tensorflow.layers import org.platanios.tensorflow.api._ import org.platanios.tensorflow.api.learn.Mode import org.platanios.tensorflow.api.learn.layers.Layer /** * Stacks output produced by a tensorflow concatenation layer * * @param axis The axis over which the tensors should be concatenated, * defaults to -1 * * @author mandar2812 date 2018/03/16 * */ case class StackOutputs(override val name: String, axis: Int = -1) extends Layer[Seq[Output], Output](name) { override val layerType: String = s"Stack[axis:$axis]" override protected def _forward(input: Seq[Output])(implicit mode: Mode): Output = tf.stack(input, axis) } case class Unstack(override val name: String, axis: Int = -1) extends Layer[Output, Seq[Output]](name) { override val layerType: String = s"Unstack[axis:$axis]" override protected def _forward(input: Output)(implicit mode: Mode): Seq[Output] = tf.unstack(input, axis) } /** * Stacks output produced by a tensorflow concatenation layer * * @param axis The axis over which the tensors should be concatenated, * defaults to -1 * * @author mandar2812 date 2018/06/03 * */ case class ConcatenateOutputs(override val name: String, axis: Int = -1) extends Layer[Seq[Output], Output](name) { override val layerType: String = s"Concatenate[axis:$axis]" override protected def _forward(input: Seq[Output])(implicit mode: Mode): Output = tf.concatenate(input, axis) } /** * Combine a collection of layers into a layer which accepts * sequences of symbolic tensors. * * @param layers The layers to be joined. * * @author mandar2812 date 2018/03/16 * */ case class SeqLayer[T, R](override val name: String, layers: Seq[Layer[T, R]]) extends Layer[Seq[T], Seq[R]](name) { override val layerType: String = s"SeqLayer[${layers.map(_.layerType).mkString(",")}]" override protected def _forward(input: Seq[T])(implicit mode: Mode): Seq[R] = layers.zip(input).map(c => c._1.forward(c._2)(mode)) } /** * Combine a collection of layers into a layer which maps * its input through each layer in the sequence. * * @param layers The layers to be joined. * * @author mandar2812 date 2018/03/16 * */ case class CombinedLayer[T, R](override val name: String, layers: Seq[Layer[T, R]]) extends Layer[T, Seq[R]](name) { override val layerType: String = s"CombinedLayer[${layers.map(_.layerType).mkString(",")}]" override protected def _forward(input: T)(implicit mode: Mode): Seq[R] = layers.map(_.forward(input)(mode)) } case class IdentityLayer[I](override val name: String) extends Layer[I, I](name) { override val layerType: String = s"Identity" override protected def _forward(input: I)(implicit mode: Mode): I = input }
amitkumarj441/DynaML
dynaml-examples/src/main/scala-2.11/io/github/mandar2812/dynaml/examples/DaisyPowerPlant.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.examples import breeze.linalg.{DenseMatrix, DenseVector} import com.quantifind.charts.Highcharts._ import io.github.mandar2812.dynaml.DynaMLPipe import io.github.mandar2812.dynaml.evaluation.RegressionMetrics import io.github.mandar2812.dynaml.kernels.{CovarianceFunction, LocalScalarKernel} import io.github.mandar2812.dynaml.models.svm.DLSSVM import io.github.mandar2812.dynaml.optimization.{GradBasedGlobalOptimizer, GridSearch} import io.github.mandar2812.dynaml.pipes.{DataPipe, StreamDataPipe} import org.apache.log4j.Logger /** * Created by mandar on 4/3/16. */ object DaisyPowerPlant { def apply(kernel: LocalScalarKernel[DenseVector[Double]], deltaT: Int = 2, timelag:Int = 0, stepPred: Int = 3, num_training: Int = 150, column: Int = 7, opt: Map[String, String]) = runExperiment(kernel, deltaT, timelag, stepPred, num_training, column, opt) def runExperiment(kernel: LocalScalarKernel[DenseVector[Double]], deltaT: Int = 2, timelag:Int = 0, stepPred: Int = 3, num_training: Int = 150, column: Int = 7, opt: Map[String, String]): Seq[Seq[AnyVal]] = { //Load Daisy data into a stream //Extract the time and Dst values val logger = Logger.getLogger(this.getClass) val names = Map(6 -> "steam pressure", 7 -> "main stem temperature", 8 -> "reheat steam temperature") //pipe training data to model and then generate test predictions //create RegressionMetrics instance and produce plots val modelTrainTest = (trainTest: ((Stream[(DenseVector[Double], Double)], Stream[(DenseVector[Double], Double)]), (DenseVector[Double], DenseVector[Double]))) => { val model = new DLSSVM(trainTest._1._1, num_training, kernel) val gs = opt("globalOpt") match { case "GS" => new GridSearch[model.type](model) .setGridSize(opt("grid").toInt) .setStepSize(opt("step").toDouble) .setLogScale(false) case "ML" => new GradBasedGlobalOptimizer[DLSSVM](model) } val startConf = kernel.state ++ Map("regularization" -> opt("regularization").toDouble) val (_, conf) = gs.optimize(startConf, opt) model.setRegParam(opt("regularization").toDouble).learn() val res = trainTest._1._2.map(testpoint => (model.predict(testpoint._1), testpoint._2)) val scoresAndLabelsPipe = DataPipe((list: List[(Double, Double)]) => list.map{l => (l._1*trainTest._2._2(-1) + trainTest._2._1(-1), l._2*trainTest._2._2(-1) + trainTest._2._1(-1))}) val scoresAndLabels = scoresAndLabelsPipe.run(res.toList) val metrics = new RegressionMetrics(scoresAndLabels, scoresAndLabels.length) metrics.setName(names(column)) metrics.print() metrics.generatePlots() //Plotting time series prediction comparisons line((1 to scoresAndLabels.length).toList, scoresAndLabels.map(_._2)) hold() line((1 to scoresAndLabels.length).toList, scoresAndLabels.map(_._1)) legend(List(names(column), "Predicted "+names(column)+" (one hour ahead)")) title("Pont-sur-Sambre 120 MW power plant: "+names(column)) unhold() Seq( Seq(deltaT, 1, num_training, 200-num_training, metrics.mae, metrics.rmse, metrics.Rsq, metrics.corr, metrics.modelYield) ) } val preProcessPipe = DynaMLPipe.fileToStream > DynaMLPipe.trimLines > DynaMLPipe.replaceWhiteSpaces > DynaMLPipe.extractTrainingFeatures( List(0,column,1,2,3,4,5), Map() ) > DynaMLPipe.removeMissingLines > StreamDataPipe((line: String) => { val splits = line.split(",") val timestamp = splits.head.toDouble val feat = DenseVector(splits.tail.map(_.toDouble)) (timestamp, feat) }) > DynaMLPipe.deltaOperationVec(deltaT) val trainTestPipe = DynaMLPipe.duplicate(preProcessPipe) > DynaMLPipe.splitTrainingTest(num_training, 200-num_training) > DynaMLPipe.trainTestGaussianStandardization > DataPipe(modelTrainTest) val dataFile = dataDir+"/powerplant.csv" trainTestPipe((dataFile, dataFile)) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/kernels/KernelOpType.scala
package io.github.mandar2812.dynaml.kernels import breeze.generic.UFunc import breeze.math.Semiring /** * @author mandar2812 date:26/10/2016. * * Marker for some kernel operation. */ sealed trait KernelOpType sealed trait KernelOpAdd extends KernelOpType sealed trait KernelOpMult extends KernelOpType sealed trait KernelOuterMult extends KernelOpType sealed trait KernelOuterAdd extends KernelOpType object KernelOpAdd extends KernelOpAdd with UFunc { implicit def opAddFromSemiring[S:Semiring]: Impl2[S, S, S] = new Impl2[S, S, S] { def apply(v: S, v2: S): S = implicitly[Semiring[S]].+(v, v2) } } object KernelOpMult extends KernelOpMult with UFunc { implicit def opMultFromSemiring[S:Semiring]: Impl2[S, S, S] = new Impl2[S, S, S] { def apply(v: S, v2: S): S = implicitly[Semiring[S]].*(v, v2) } } object KernelOuterMult extends KernelOuterMult with UFunc object KernelOuterAdd extends KernelOuterAdd with UFunc
amitkumarj441/DynaML
dynaml-pipes/src/main/scala-2.11/io/github/mandar2812/dynaml/pipes/Scaler.scala
<gh_stars>0 /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.pipes import scalaxy.streams._ /** * @author mandar2812 17/6/16. * * Top level trait; represents the scaling operation, used * heavily in data processing tasks. */ trait Scaler[S] extends DataPipe[S, S]{ override def apply[T <: Traversable[S]](data: T) = optimize { data.map(run).asInstanceOf[T] } def *[T](that: Scaler[T]) = { val firstRun = this.run _ new Scaler[(S,T)] { override def run(data: (S, T)): (S, T) = (firstRun(data._1), that(data._2)) } } def >(otherScaler: Scaler[S]) = { val firstRun = this.run _ new Scaler[S] { def run(data: S) = otherScaler.run(firstRun(data)) } } } object Scaler { def apply[S](f: (S) => S): Scaler[S] = new Scaler[S] { override def run(data: S): S = f(data) } } /** * @author mandar2812 17/6/16 * * * */ trait ReversibleScaler[S] extends Scaler[S] with Encoder[S, S]{ /** * The inverse operation of this scaling. * * */ override val i: Scaler[S] override def apply[T<: Traversable[S]](data: T):T = optimize { data.map(run).asInstanceOf[T] } def *[T](that: ReversibleScaler[T]) = { val firstInv = this.i val firstRun = this.run _ new ReversibleScaler[(S, T)] { val i: Scaler[(S,T)] = firstInv * that.i override def run(data: (S, T)): (S, T) = (firstRun(data._1), that(data._2)) } } def >(otherRevScaler: ReversibleScaler[S]): ReversibleScaler[S] = { val firstInv = this.i val firstRun = this.run _ new ReversibleScaler[S] { val i: Scaler[S] = otherRevScaler.i > firstInv def run(data: S) = otherRevScaler.run(firstRun(data)) } } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/optimization/ProbGPCommMachine.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.optimization import breeze.linalg.{DenseMatrix, DenseVector, max, min, sum} import breeze.numerics.exp import io.github.mandar2812.dynaml.DynaMLPipe import io.github.mandar2812.dynaml.kernels.DecomposableCovariance import io.github.mandar2812.dynaml.models.gp.{AbstractGPRegressionModel, GaussianProcessMixture} import io.github.mandar2812.dynaml.pipes.{DataPipe, WeightedSumReducer} import scala.reflect.ClassTag /** * Build GP committee model after performing the CSA routine * * @author mandar2812 date 08/02/2017 * * */ class ProbGPCommMachine[T, I: ClassTag]( model: AbstractGPRegressionModel[T, I]) extends CoupledSimulatedAnnealing(model) { private var policy: String = "CSA" private var baselinePolicy: String = "max" def _policy = policy def setPolicy(p: String): this.type = { if(p == "CSA" || p == "Coupled Simulated Annealing") policy = "CSA" else policy = "GS" this } def setBaseLinePolicy(p: String): this.type = { if(p == "avg" || p == "mean" || p == "average") baselinePolicy = "mean" else if(p == "min") baselinePolicy = "min" else if(p == "max") baselinePolicy = "max" else baselinePolicy = "mean" this } private def calculateEnergyLandscape(initialConfig: Map[String, Double], options: Map[String, String]) = if(policy == "CSA") performCSA(initialConfig, options) else getEnergyLandscape(initialConfig, options, meanFieldPrior) private def modelProbabilities = DataPipe(ProbGPCommMachine.calculateModelWeightsSigmoid(baselinePolicy) _) override def optimize( initialConfig: Map[String, Double], options: Map[String, String]) = { //Find out the blocked hyper parameters and their values val blockedHypParams = system.covariance.blocked_hyper_parameters ++ system.noiseModel.blocked_hyper_parameters val (kernelParams, noiseParams) = ( system.covariance.hyper_parameters, system.noiseModel.hyper_parameters) val blockedState = system._current_state.filterKeys(blockedHypParams.contains) val energyLandscape = calculateEnergyLandscape(initialConfig, options) //Calculate the weights of each configuration val weights = modelProbabilities(energyLandscape).map(c => (c._1, c._2 ++ blockedState)) //Declare implicit value for weighted kernel implicit val encoder = DynaMLPipe.genericReplicationEncoder(weights.length) //Declare implicit value for transformation required for creation of the compound kernel implicit val transform: DataPipe[T, Seq[(I, Double)]] = DataPipe(system.dataAsSeq) //Declare implicit reducer required for the weighted kernel implicit val reducer = WeightedSumReducer(weights.map(c => c._1*c._1).toArray) //Now construct a weighted Gaussian Process model val (covariancePipe, noisePipe) = (system.covariance.asPipe, system.noiseModel.asPipe) //The mean function of the original GP val meanF = system.mean //Get the kernels, noise models and mean functions of each GP in the committee val (kernels, noiseModels, meanFuncs) = weights.map(weightCouple => { val (w, conf) = weightCouple val (k, n) = ( covariancePipe(conf.filterKeys(kernelParams.contains)), noisePipe(conf.filterKeys(noiseParams.contains))) (k, n, meanF > DataPipe((x: Double) => x*w)) }).unzip3 //Calculate the resultant kernels, noise and mean function of GP committee val (netKernel, netNoiseModel, netMeanFunc) = ( new DecomposableCovariance[I](kernels:_*), new DecomposableCovariance[I](noiseModels:_*), DataPipe((x: I) => meanFuncs.map(_(x)).sum)) println("\n===============================================") println("Constructing Probabilistic GP Committee") //Create the GP committee with the calculated specifications val committeeGP: AbstractGPRegressionModel[T, I] = AbstractGPRegressionModel( netKernel, netNoiseModel, netMeanFunc)( system.data, system.npoints) print("Number of model instances = ") pprint.pprintln(weights.length) println("--------------------------------------") println( "Calculated model probabilities/weights are \n") weights.foreach(wc => { println("\nConfiguration: ") pprint.pprintln(wc._2) print("\nProbability = ") pprint.pprintln(wc._1) }) println("--------------------------------------") println( "State of new model:- Covariance: ") pprint.pprintln(committeeGP.covariance.state) println( "State of new model:- Noise ") pprint.pprintln(committeeGP.noiseModel.state) println("===============================================") if(options.contains("persist") && (options("persist") == "true" || options("persist") == "1")) { println("Persisting model state") committeeGP.persist(committeeGP._current_state) } //Return the resultant model (committeeGP, committeeGP._current_state) } } object ProbGPCommMachine { def calculateModelWeights(energyLandscape: List[(Double, Map[String, Double])]) : Seq[(Double, Map[String, Double])] = { val h = DenseVector(energyLandscape.map(_._1).toArray) val hTotal = sum(h) val alpha = if (h.length == 1) 1.0 else 1.0 / (h.length - 1) val weights: DenseVector[Double] = h.map(p => 1.0 - (p / hTotal)) :* alpha val configurations = energyLandscape.map(_._2) weights.toArray.toSeq zip configurations } def calculateModelWeightsSigmoid( baselineMethod: String = "mean")( energyLandscape: Seq[(Double, Map[String, Double])]) : Seq[(Double, Map[String, Double])] = { val h = DenseVector(energyLandscape.map(_._1).toArray) val baseline = baselineMethod match { case "mean" => sum(h)/h.length.toDouble case "avg" => sum(h)/h.length.toDouble case "min" => min(h) case "max" => max(h) case _ => sum(h)/h.length.toDouble } val maskMatrices: Seq[DenseMatrix[Double]] = (0 until h.length).map(i => DenseMatrix.tabulate[Double](h.length, h.length)((r, s) => { if (r == i) 0.0 else if (s == i) -1.0 else if (r == s) 1.0 else 0.0 }) ) val weights = maskMatrices.zipWithIndex.map(mask => { 1.0/sum(exp((mask._1*h) :* (-1.0/baseline))) }) val configurations = energyLandscape.map(_._2) weights.toArray.toSeq zip configurations } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/data/AbstractDataSet.scala
<gh_stars>0 /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.tensorflow.data import org.platanios.tensorflow.api._ import org.platanios.tensorflow.api.implicits.helpers.OutputToTensor import org.platanios.tensorflow.api.ops.Function import org.platanios.tensorflow.api.ops.io.data.{Data, Dataset} case class AbstractDataSet[TI, TT]( trainData: TI, trainLabels: TT, nTrain: Int, testData: TI, testLabels: TT, nTest: Int) { def training_data[OI, DI, SI, OT, DT, ST]( implicit evData1: Data.Aux[TI,OI,DI,SI], evData2: Data.Aux[TT,OT,DT,ST], evOToT1: OutputToTensor.Aux[OI, TI], evOToT2: OutputToTensor.Aux[OT, TT], evFunctionInput1: Function.ArgType[OI], evFunctionInput2: Function.ArgType[OT]) : Dataset[(TI, TT), (OI, OT), (DI, DT), (SI, ST)] = tf.data.TensorSlicesDataset[TI, OI, DI, SI](trainData).zip( tf.data.TensorSlicesDataset[TT, OT, DT, ST](trainLabels) ) def test_data[OI, DI, SI, OT, DT, ST]( implicit evData1: Data.Aux[TI,OI,DI,SI], evData2: Data.Aux[TT,OT,DT,ST], evOToT1: OutputToTensor.Aux[OI, TI], evOToT2: OutputToTensor.Aux[OT, TT], evFunctionInput1: Function.ArgType[OI], evFunctionInput2: Function.ArgType[OT]) : Dataset[(TI, TT), (OI, OT), (DI, DT), (SI, ST)] = tf.data.TensorSlicesDataset[TI, OI, DI, SI](testData).zip( tf.data.TensorSlicesDataset[TT, OT, DT, ST](testLabels) ) }
amitkumarj441/DynaML
dynaml-repl/src/main/scala-2.11/io/github/mandar2812/dynaml/repl/DynaMLRepl.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.repl import java.io.{InputStream, OutputStream} import java.nio.file.NoSuchFileException import ammonite.interp.Preprocessor import ammonite.ops.{Path, _} import ammonite.repl.{RemoteLogger, Repl, ReplApiImpl, ReplLoad} import ammonite.runtime.Evaluator.AmmoniteExit import ammonite.runtime.Storage import ammonite.util.Name.backtickWrap import ammonite.util.Util.{CodeSource, normalizeNewlines} import ammonite.util._ import fastparse.utils.Compat.Context import fastparse.utils.Utils._ import io.github.mandar2812.dynaml.repl.Router.{ArgSig, EntryPoint} import scala.annotation.{StaticAnnotation, tailrec} import scala.collection.mutable import scala.io.Source import scala.language.experimental.macros import fastparse.utils.Compat.Context /** * Customised version of the Ammonite REPL * @author mandar2812 date 1/6/16. * */ class DynaMLRepl( input: InputStream, output: OutputStream, error: OutputStream, storage: Storage, basePredefs: Seq[PredefInfo], customPredefs: Seq[PredefInfo], wd: ammonite.ops.Path, welcomeBanner: Option[String], replArgs: IndexedSeq[Bind[_]] = Vector.empty, initialColors: Colors = Colors.Default, remoteLogger: Option[RemoteLogger], replCodeWrapper: Preprocessor.CodeWrapper, scriptCodeWrapper: Preprocessor.CodeWrapper) extends Repl( input, output, error, storage, basePredefs, customPredefs, wd, welcomeBanner, replArgs, initialColors, remoteLogger, replCodeWrapper, scriptCodeWrapper) { repl => override val prompt = Ref("DynaML>") /*override val interp: DynaMLInterpreter = new DynaMLInterpreter( printer, storage, basePredefs, customPredefs, Seq(( "ammonite.repl.ReplBridge", "repl", new ReplApiImpl { def replArgs0 = repl.replArgs def printer = repl.printer val colors = repl.colors def sess = repl.sess0 val prompt = repl.prompt val frontEnd = repl.frontEnd def lastException = repl.lastException def fullHistory = storage.fullHistory() def history = repl.history def newCompiler() = interp.compilerManager.init(force = true) def compiler = interp.compilerManager.compiler.compiler def fullImports = repl.fullImports def imports = repl.imports def usedEarlierDefinitions = repl.usedEarlierDefinitions def width = frontEnd().width def height = frontEnd().height object load extends ReplLoad with (String => Unit){ def apply(line: String) = { interp.processExec(line, currentLine, () => currentLine += 1) match{ case Res.Failure(s) => throw new CompilationError(s) case Res.Exception(t, s) => throw t case _ => } } def exec(file: Path): Unit = { interp.watch(file) apply(normalizeNewlines(read(file))) } } } )), wd, colors, verboseOutput = true, getFrame = () => frames().head, createFrame = () => { val f = sess0.childFrame(frames().head); frames() = f :: frames(); f }, replCodeWrapper = replCodeWrapper, scriptCodeWrapper = scriptCodeWrapper )*/ } object Defaults { val dynaml_install_dir = System.getenv("DYNAML_HOME") val root_dir = if (dynaml_install_dir != null) dynaml_install_dir else "." val welcomeBanner = { def ammoniteVersion = ammonite.Constants.version def scalaVersion = scala.util.Properties.versionNumberString def javaVersion = System.getProperty("java.version") def version = BuildInfo.version def banner = Source.fromFile(root_dir+"/conf/banner.txt").getLines.mkString("\n") Util.normalizeNewlines( banner+s"""\nWelcome to DynaML $version \nInteractive Scala shell for Machine Learning Research | |Currently running on: |(Scala $scalaVersion Java $javaVersion) |""".stripMargin ) } // Need to import stuff from ammonite.ops manually, rather than from the // ammonite.ops.Extensions bundle, because otherwise they result in ambiguous // imports if someone else imports maunally val predefString = s""" |import ammonite.ops.{ | PipeableImplicit, | FilterMapExtImplicit, | FilterMapArraysImplicit, | FilterMapIteratorsImplicit, | FilterMapGeneratorsImplicit, | SeqFactoryFunc, | RegexContextMaker, | Callable1Implicit |} |import ammonite.runtime.tools._ |import ammonite.repl.tools._ |import ammonite.runtime.tools.IvyConstructor.{ArtifactIdExt, GroupIdExt} |import io.github.mandar2812.dynaml.repl.Router.{doc, main} |import io.github.mandar2812.dynaml.repl.Scripts.pathScoptRead |import ammonite.interp.InterpBridge.value.exit |""".stripMargin val replPredef = """ |import ammonite.repl.ReplBridge.value.{ | codeColorsImplicit, | tprintColorsImplicit, | pprinterImplicit, | show, | typeOf |} """.stripMargin def ammoniteHome = ammonite.ops.Path(System.getProperty("user.home"))/".ammonite" val ignoreUselessImports = """ |notify => _, | wait => _, | equals => _, | asInstanceOf => _, | synchronized => _, | notifyAll => _, | isInstanceOf => _, | == => _, | != => _, | getClass => _, | ne => _, | eq => _, | ## => _, | hashCode => _, | _ |""" def dynaMlPredef = Source.fromFile(root_dir+"/conf/DynaMLInit.scala").getLines.mkString("\n") } object Compat{ def copyAnnotatedType(c: Context) (tpe: c.universe.AnnotatedType, newAnnots: List[c.universe.Annotation]) = { import c.universe.compat._ c.universe.AnnotatedType(newAnnots, tpe.underlying) } } object Cli{ case class Arg[T, V](name: String, shortName: Option[Char], doc: String, action: (T, V) => T) (implicit val reader: scopt.Read[V]){ def runAction(t: T, s: String) = action(t, reader.reads(s)) } case class Config(predefCode: String = "", defaultPredef: Boolean = true, homePredef: Boolean = true, storageBackend: Storage = new Storage.Folder(Defaults.ammoniteHome), wd: Path = ammonite.ops.pwd, welcomeBanner: Option[String] = Some(Defaults.welcomeBanner), verboseOutput: Boolean = true, remoteLogging: Boolean = true, watch: Boolean = false, code: Option[String] = None, home: Path = Defaults.ammoniteHome, predefFile: Option[Path] = None, help: Boolean = false, colored: Option[Boolean] = None) import Scripts.pathScoptRead val genericSignature = Seq( Arg[Config, String]( "predef-code", None, "Any commands you want to execute at the start of the REPL session", (c, v) => c.copy(predefCode = v) ), Arg[Config, String]( "code", Some('c'), "Pass in code to be run immediately in the REPL", (c, v) => c.copy(code = Some(v)) ), Arg[Config, Path]( "home", Some('h'), "The home directory of the REPL; where it looks for config and caches", (c, v) => c.copy(home = v) ), Arg[Config, Path]( "predef", Some('p'), """Lets you load your predef from a custom location, rather than the |default location in your Ammonite home""".stripMargin, (c, v) => c.copy(predefFile = Some(v)) ), Arg[Config, Unit]( "no-home-predef", None, """Disables the default behavior of loading predef files from your |~/.ammonite/predef.sc, predefScript.sc, or predefShared.sc. You can |choose an additional predef to use using `--predef |""".stripMargin, (c, v) => c.copy(homePredef = false) ), Arg[Config, Unit]( "no-default-predef", None, """Disable the default predef and run Ammonite with the minimal predef |possible |""".stripMargin, (c, v) => c.copy(defaultPredef = false) ), Arg[Config, Unit]( "silent", Some('s'), """Make ivy logs go silent instead of printing though failures will |still throw exception""".stripMargin, (c, v) => c.copy(verboseOutput = false) ), Arg[Config, Unit]( "help", None, """Print this message""".stripMargin, (c, v) => c.copy(help = true) ), Arg[Config, Boolean]( "color", None, """Enable or disable colored output; by default colors are enabled |in both REPL and scripts if the console is interactive, and disabled |otherwise""".stripMargin, (c, v) => c.copy(colored = Some(v)) ), Arg[Config, Unit]( "watch", Some('w'), "Watch and re-run your scripts when they change", (c, v) => c.copy(watch = true) ) ) val replSignature = Seq( Arg[Config, String]( "banner", Some('b'), "Customize the welcome banner that gets shown when Ammonite starts", (c, v) => c.copy(welcomeBanner = Some(v)) ), Arg[Config, Unit]( "no-remote-logging", None, """Disable remote logging of the number of times a REPL starts and runs |commands |""".stripMargin, (c, v) => c.copy(remoteLogging= false) ) ) val ammoniteArgSignature = genericSignature ++ replSignature def showArg(arg: Arg[_, _]) = " " + arg.shortName.fold("")("-" + _ + ", ") + "--" + arg.name def formatBlock(args: Seq[Arg[_, _]], leftMargin: Int) = { for(arg <- args) yield { showArg(arg).padTo(leftMargin, ' ').mkString + arg.doc.lines.mkString(Util.newLine + " " * leftMargin) } } def ammoniteHelp = { val leftMargin = ammoniteArgSignature.map(showArg(_).length).max + 2 s"""Ammonite REPL & Script-Runner, ${ammonite.Constants.version} |usage: amm [ammonite-options] [script-file [script-options]] | |${formatBlock(genericSignature, leftMargin).mkString(Util.newLine)} | |REPL-specific args: |${formatBlock(replSignature, leftMargin).mkString(Util.newLine)} """.stripMargin } def groupArgs[T](flatArgs: List[String], args: Seq[Arg[T, _]], initial: T): Either[String, (T, List[String])] = { val argsMap0: Seq[(String, Arg[T, _])] = args .flatMap{x => Seq(x.name -> x) ++ x.shortName.map(_.toString -> x)} val argsMap = argsMap0.toMap @tailrec def rec(keywordTokens: List[String], current: T): Either[String, (T, List[String])] = { keywordTokens match{ case head :: rest if head(0) == '-' => val realName = if(head(1) == '-') head.drop(2) else head.drop(1) argsMap.get(realName) match { case Some(cliArg) => if (cliArg.reader == scopt.Read.unitRead) { rec(rest, cliArg.runAction(current, "")) } else rest match{ case next :: rest2 => rec(rest2, cliArg.runAction(current, next)) case Nil => Left(s"Expected a value after argument $head") } case None => Right((current, keywordTokens)) } case _ => Right((current, keywordTokens)) } } rec(flatArgs, initial) } } /** * More or less a minimal version of Autowire's Server that lets you generate * a set of "routes" from the methods defined in an object, and call them * using passing in name/args/kwargs via Java reflection, without having to * generate/compile code or use Scala reflection. This saves us spinning up * the Scala compiler and greatly reduces the startup time of cached scripts. */ object Router{ class doc(s: String) extends StaticAnnotation class main extends StaticAnnotation def generateRoutes[T](t: T): Seq[Router.EntryPoint] = macro generateRoutesImpl[T] def generateRoutesImpl[T: c.WeakTypeTag](c: Context)(t: c.Expr[T]): c.Expr[Seq[EntryPoint]] = { import c.universe._ val r = new Router(c) val allRoutes = r.getAllRoutesForClass( weakTypeOf[T].asInstanceOf[r.c.Type], t.tree.asInstanceOf[r.c.Tree] ).asInstanceOf[Iterable[c.Tree]] c.Expr[Seq[EntryPoint]](q"_root_.scala.Seq(..$allRoutes)") } /** * Models what is known by the router about a single argument: that it has * a [[name]], a human-readable [[typeString]] describing what the type is * (just for logging and reading, not a replacement for a `TypeTag`) and * possible a function that can compute its default value */ case class ArgSig(name: String, typeString: String, doc: Option[String], default: Option[() => Any]) def stripDashes(s: String) = { if (s.startsWith("--")) s.drop(2) else if (s.startsWith("-")) s.drop(1) else s } /** * What is known about a single endpoint for our routes. It has a [[name]], * [[argSignatures]] for each argument, and a macro-generated [[invoke0]] * that performs all the necessary argument parsing and de-serialization. * * Realistically, you will probably spend most of your time calling [[invoke]] * instead, which provides a nicer API to call it that mimmicks the API of * calling a Scala method. */ case class EntryPoint(name: String, argSignatures: Seq[ArgSig], doc: Option[String], varargs: Boolean, invoke0: (Map[String, String], Seq[String]) => Result[Any]){ def invoke(groupedArgs: Seq[(String, Option[String])]): Result[Any] = { var remainingArgSignatures = argSignatures.toList val accumulatedKeywords = mutable.Map.empty[ArgSig, mutable.Buffer[String]] val keywordableArgs = if (varargs) argSignatures.dropRight(1) else argSignatures for(arg <- keywordableArgs) accumulatedKeywords(arg) = mutable.Buffer.empty val leftoverArgs = mutable.Buffer.empty[String] val lookupArgSig = argSignatures.map(x => (x.name, x)).toMap var incomplete: Option[ArgSig] = None for(group <- groupedArgs){ group match{ case (value, None) => if (value(0) == '-' && !varargs){ lookupArgSig.get(stripDashes(value)) match{ case None => leftoverArgs.append(value) case Some(sig) => incomplete = Some(sig) } } else remainingArgSignatures match { case Nil => leftoverArgs.append(value) case last :: Nil if varargs => leftoverArgs.append(value) case next :: rest => accumulatedKeywords(next).append(value) remainingArgSignatures = rest } case (rawKey, Some(value)) => val key = stripDashes(rawKey) lookupArgSig.get(key) match{ case Some(x) if accumulatedKeywords.contains(x) => if (accumulatedKeywords(x).nonEmpty && varargs){ leftoverArgs.append(rawKey, value) }else{ accumulatedKeywords(x).append(value) remainingArgSignatures = remainingArgSignatures.filter(_.name != key) } case _ => leftoverArgs.append(rawKey, value) } } } val missing0 = remainingArgSignatures.filter(_.default.isEmpty) val missing = if(varargs) { missing0.filter(_ != argSignatures.last) } else { missing0.filter(x => incomplete != Some(x)) } val duplicates = accumulatedKeywords.toSeq.filter(_._2.length > 1) if ( incomplete.nonEmpty || missing.nonEmpty || duplicates.nonEmpty || (leftoverArgs.nonEmpty && !varargs) ){ Result.Error.MismatchedArguments( missing = missing, unknown = leftoverArgs, duplicate = duplicates, incomplete = incomplete ) } else { val mapping = accumulatedKeywords .iterator .collect{case (k, Seq(single)) => (k.name, single)} .toMap try invoke0(mapping, leftoverArgs) catch{case e: Throwable => Result.Error.Exception(e) } } } } def tryEither[T](t: => T, error: Throwable => Result.ParamError) = { try Right(t) catch{ case e: Throwable => Left(error(e))} } def readVarargs[T](arg: ArgSig, values: Seq[String], thunk: String => T) = { val attempts = for(item <- values) yield tryEither(thunk(item), Result.ParamError.Invalid(arg, item, _)) val bad = attempts.collect{ case Left(x) => x} if (bad.nonEmpty) Left(bad) else Right(attempts.collect{case Right(x) => x}) } def read[T](dict: Map[String, String], default: => Option[Any], arg: ArgSig, thunk: String => T): FailMaybe = { dict.get(arg.name) match{ case None => tryEither(default.get, Result.ParamError.DefaultFailed(arg, _)).left.map(Seq(_)) case Some(x) => tryEither(thunk(x), Result.ParamError.Invalid(arg, x, _)).left.map(Seq(_)) } } /** * Represents what comes out of an attempt to invoke an [[EntryPoint]]. * Could succeed with a value, but could fail in many different ways. */ sealed trait Result[+T] object Result{ /** * Invoking the [[EntryPoint]] was totally successful, and returned a * result */ case class Success[T](value: T) extends Result[T] /** * Invoking the [[EntryPoint]] was not successful */ sealed trait Error extends Result[Nothing] object Error{ /** * Invoking the [[EntryPoint]] failed with an exception while executing * code within it. */ case class Exception(t: Throwable) extends Error /** * Invoking the [[EntryPoint]] failed because the arguments provided * did not line up with the arguments expected */ case class MismatchedArguments(missing: Seq[ArgSig], unknown: Seq[String], duplicate: Seq[(ArgSig, Seq[String])], incomplete: Option[ArgSig]) extends Error /** * Invoking the [[EntryPoint]] failed because there were problems * deserializing/parsing individual arguments */ case class InvalidArguments(values: Seq[ParamError]) extends Error } sealed trait ParamError object ParamError{ /** * Something went wrong trying to de-serialize the input parameter; * the thrown exception is stored in [[ex]] */ case class Invalid(arg: ArgSig, value: String, ex: Throwable) extends ParamError /** * Something went wrong trying to evaluate the default value * for this input parameter */ case class DefaultFailed(arg: ArgSig, ex: Throwable) extends ParamError } } type FailMaybe = Either[Seq[Result.ParamError], Any] type FailAll = Either[Seq[Result.ParamError], Seq[Any]] def validate(args: Seq[FailMaybe]): Result[Seq[Any]] = { val lefts = args.collect{case Left(x) => x}.flatten if (lefts.nonEmpty) Result.Error.InvalidArguments(lefts) else { val rights = args.collect{case Right(x) => x} Result.Success(rights) } } } class Router [C <: Context](val c: C) { import c.universe._ def getValsOrMeths(curCls: Type): Iterable[MethodSymbol] = { def isAMemberOfAnyRef(member: Symbol) = weakTypeOf[AnyRef].members.exists(_.name == member.name) val extractableMembers = for { member <- curCls.declarations if !isAMemberOfAnyRef(member) if !member.isSynthetic if member.isPublic if member.isTerm memTerm = member.asTerm if memTerm.isMethod } yield memTerm.asMethod extractableMembers flatMap { case memTerm => if (memTerm.isSetter || memTerm.isConstructor || memTerm.isGetter) Nil else Seq(memTerm) } } def extractMethod(meth: MethodSymbol, curCls: c.universe.Type, target: c.Tree): c.universe.Tree = { val flattenedArgLists = meth.paramss.flatten def hasDefault(i: Int) = { val defaultName = s"${meth.name}$$default$$${i + 1}" if (curCls.members.exists(_.name.toString == defaultName)) { Some(defaultName) } else { None } } val argListSymbol = q"${c.fresh[TermName]("argsList")}" val extrasSymbol = q"${c.fresh[TermName]("extras")}" val defaults = for ((arg, i) <- flattenedArgLists.zipWithIndex) yield { hasDefault(i).map(defaultName => q"() => $target.${newTermName(defaultName)}") } def getDocAnnotation(annotations: List[Annotation]) = { val (docTrees, remaining) = annotations.partition(_.tpe =:= typeOf[Router.doc]) val docValues = for { doc <- docTrees if doc.scalaArgs.head.isInstanceOf[Literal] l = doc.scalaArgs.head.asInstanceOf[Literal] if l.value.value.isInstanceOf[String] } yield l.value.value.asInstanceOf[String] (remaining, docValues.headOption) } def unwrapVarargType(arg: Symbol) = { val vararg = arg.typeSignature.typeSymbol == definitions.RepeatedParamClass val unwrappedType = if (!vararg) arg.typeSignature else arg.typeSignature.asInstanceOf[TypeRef].args(0) (vararg, unwrappedType) } val (_, methodDoc) = getDocAnnotation(meth.annotations) val readArgSigs = for( ((arg, defaultOpt), i) <- flattenedArgLists.zip(defaults).zipWithIndex ) yield { val (vararg, varargUnwrappedType) = unwrapVarargType(arg) val default = if (vararg) q"scala.Some(scala.Nil)" else defaultOpt match { case Some(defaultExpr) => q"scala.Some($defaultExpr())" case None => q"scala.None" } val (docUnwrappedType, docOpt) = varargUnwrappedType match{ case t: AnnotatedType => val (remaining, docValue) = getDocAnnotation(t.annotations) if (remaining.isEmpty) (t.underlying, docValue) else (Compat.copyAnnotatedType(c)(t, remaining), docValue) case t => (t, None) } val docTree = docOpt match{ case None => q"scala.None" case Some(s) => q"scala.Some($s)" } val argSig = q""" io.github.mandar2812.dynaml.repl.Router.ArgSig( ${arg.name.toString}, ${docUnwrappedType.toString + (if(vararg) "*" else "")}, $docTree, $defaultOpt ) """ val reader = if(vararg) q""" io.github.mandar2812.dynaml.repl.Router.readVarargs[$docUnwrappedType]( $argSig, $extrasSymbol, implicitly[scopt.Read[$docUnwrappedType]].reads(_) ) """ else q""" io.github.mandar2812.dynaml.repl.Router.read[$docUnwrappedType]( $argListSymbol, $default, $argSig, implicitly[scopt.Read[$docUnwrappedType]].reads(_) ) """ (reader, argSig, vararg) } val (readArgs, argSigs, varargs) = readArgSigs.unzip3 val (argNames, argNameCasts) = flattenedArgLists.map { arg => val (vararg, unwrappedType) = unwrapVarargType(arg) ( pq"${arg.name.toTermName}", if (!vararg) q"${arg.name.toTermName}.asInstanceOf[$unwrappedType]" else q"${arg.name.toTermName}.asInstanceOf[Seq[$unwrappedType]]: _*" ) }.unzip q""" io.github.mandar2812.dynaml.repl.Router.EntryPoint( ${meth.name.toString}, scala.Seq(..$argSigs), ${methodDoc match{ case None => q"scala.None" case Some(s) => q"scala.Some($s)" }}, ${varargs.contains(true)}, ($argListSymbol: Map[String, String], $extrasSymbol: Seq[String]) => io.github.mandar2812.dynaml.repl.Router.validate(Seq(..$readArgs)) match{ case io.github.mandar2812.dynaml.repl.Router.Result.Success(List(..$argNames)) => io.github.mandar2812.dynaml.repl.Router.Result.Success($target.${meth.name.toTermName}(..$argNameCasts)) case x => x } ) """ } def getAllRoutesForClass(curCls: Type, target: c.Tree): Iterable[c.universe.Tree] = for{ t <- getValsOrMeths(curCls) if t.annotations.exists(_.tpe =:= typeOf[Router.main]) } yield extractMethod(t, curCls, target) } /** * Logic around using Ammonite as a script-runner; invoking scripts via the * macro-generated [[Router]], and pretty-printing any output or error messages */ object Scripts { def groupArgs(flatArgs: List[String]): Seq[(String, Option[String])] = { var keywordTokens = flatArgs var scriptArgs = Vector.empty[(String, Option[String])] while(keywordTokens.nonEmpty) keywordTokens match{ case List(head, next, rest@_*) if head.startsWith("-") => scriptArgs = scriptArgs :+ (head, Some(next)) keywordTokens = rest.toList case List(head, rest@_*) => scriptArgs = scriptArgs :+ (head, None) keywordTokens = rest.toList } scriptArgs } def runScript(wd: Path, path: Path, interp: ammonite.interp.Interpreter, scriptArgs: Seq[(String, Option[String])] = Nil) = { interp.watch(path) val (pkg, wrapper) = Util.pathToPackageWrapper(Seq(), path relativeTo wd) for{ scriptTxt <- try Res.Success(Util.normalizeNewlines(read(path))) catch{ case e: NoSuchFileException => Res.Failure("Script file not found: " + path) } processed <- interp.processModule( scriptTxt, CodeSource(wrapper, pkg, Seq(Name("ammonite"), Name("$file")), Some(path)), autoImport = true, // Not sure why we need to wrap this in a separate `$routes` object, // but if we don't do it for some reason the `generateRoutes` macro // does not see the annotations on the methods of the outer-wrapper. // It can inspect the type and its methods fine, it's just the // `methodsymbol.annotations` ends up being empty. extraCode = Util.normalizeNewlines( s""" |val $$routesOuter = this |object $$routes extends scala.Function0[scala.Seq[io.github.mandar2812.dynaml.repl.Router.EntryPoint]]{ | def apply() = io.github.mandar2812.dynaml.repl.Router.generateRoutes[$$routesOuter.type]($$routesOuter) |} """.stripMargin ), hardcoded = true ) routeClsName <- processed.blockInfo.lastOption match{ case Some(meta) => Res.Success(meta.id.wrapperPath) case None => Res.Skip } routesCls = interp .evalClassloader .loadClass(routeClsName + "$$routes$") scriptMains = routesCls .getField("MODULE$") .get(null) .asInstanceOf[() => Seq[Router.EntryPoint]] .apply() res <- Util.withContextClassloader(interp.evalClassloader){ scriptMains match { // If there are no @main methods, there's nothing to do case Seq() => if (scriptArgs.isEmpty) Res.Success(()) else { val scriptArgString = scriptArgs.flatMap{case (a, b) => Seq(a) ++ b}.map(literalize(_)) .mkString(" ") Res.Failure("Script " + path.last + " does not take arguments: " + scriptArgString) } // If there's one @main method, we run it with all args case Seq(main) => runMainMethod(main, scriptArgs) // If there are multiple @main methods, we use the first arg to decide // which method to run, and pass the rest to that main method case mainMethods => val suffix = formatMainMethods(mainMethods) scriptArgs match{ case Seq() => Res.Failure( s"Need to specify a subcommand to call when running " + path.last + suffix ) case Seq((head, Some(_)), tail @ _*) => Res.Failure( "To select a subcommand to run, you don't need --s." + Util.newLine + s"Did you mean `${head.drop(2)}` instead of `$head`?" ) case Seq((head, None), tail @ _*) => mainMethods.find(_.name == head) match{ case None => Res.Failure( s"Unable to find subcommand: " + backtickWrap(head) + suffix ) case Some(main) => runMainMethod(main, tail) } } } } } yield res } def formatMainMethods(mainMethods: Seq[Router.EntryPoint]) = { if (mainMethods.isEmpty) "" else{ val leftColWidth = getLeftColWidth(mainMethods.flatMap(_.argSignatures)) val methods = for(main <- mainMethods) yield formatMainMethodSignature(main, 2, leftColWidth) Util.normalizeNewlines( s""" | |Available subcommands: | |${methods.mkString(Util.newLine)}""".stripMargin ) } } def getLeftColWidth(items: Seq[ArgSig]) = { items.map(_.name.length + 2) match{ case Nil => 0 case x => x.max } } def formatMainMethodSignature(main: Router.EntryPoint, leftIndent: Int, leftColWidth: Int) = { // +2 for space on right of left col val args = main.argSignatures.map(renderArg(_, leftColWidth + leftIndent + 2 + 2, 80)) val leftIndentStr = " " * leftIndent val argStrings = for((lhs, rhs) <- args) yield { val lhsPadded = lhs.padTo(leftColWidth, ' ') val rhsPadded = rhs.lines.mkString(Util.newLine) s"$leftIndentStr $lhsPadded $rhsPadded" } val mainDocSuffix = main.doc match{ case Some(d) => Util.newLine + leftIndentStr + softWrap(d, leftIndent, 80) case None => "" } s"""$leftIndentStr${main.name}${mainDocSuffix} |${argStrings.map(_ + Util.newLine).mkString}""".stripMargin } def runMainMethod(mainMethod: Router.EntryPoint, scriptArgs: Seq[(String, Option[String])]): Res[Any] = { val leftColWidth = getLeftColWidth(mainMethod.argSignatures) def expectedMsg = formatMainMethodSignature(mainMethod, 0, leftColWidth) def pluralize(s: String, n: Int) = { if (n == 1) s else s + "s" } mainMethod.invoke(scriptArgs) match{ case Router.Result.Success(x) => Res.Success(x) case Router.Result.Error.Exception(x: AmmoniteExit) => Res.Success(x.value) case Router.Result.Error.Exception(x) => Res.Exception(x, "") case Router.Result.Error.MismatchedArguments(missing, unknown, duplicate, incomplete) => val missingStr = if (missing.isEmpty) "" else { val chunks = for (x <- missing) yield "--" + x.name + ": " + x.typeString val argumentsStr = pluralize("argument", chunks.length) s"Missing $argumentsStr: (${chunks.mkString(", ")})" + Util.newLine } val unknownStr = if (unknown.isEmpty) "" else { val argumentsStr = pluralize("argument", unknown.length) s"Unknown $argumentsStr: " + unknown.map(literalize(_)).mkString(" ") + Util.newLine } val duplicateStr = if (duplicate.isEmpty) "" else { val lines = for ((sig, options) <- duplicate) yield { s"Duplicate arguments for (--${sig.name}: ${sig.typeString}): " + options.map(literalize(_)).mkString(" ") + Util.newLine } lines.mkString } val incompleteStr = incomplete match{ case None => "" case Some(sig) => s"Option (--${sig.name}: ${sig.typeString}) is missing a corresponding value" + Util.newLine } Res.Failure( Util.normalizeNewlines( s"""$missingStr$unknownStr$duplicateStr$incompleteStr |Arguments provided did not match expected signature: | |$expectedMsg |""".stripMargin ) ) case Router.Result.Error.InvalidArguments(x) => val argumentsStr = pluralize("argument", x.length) val thingies = x.map{ case Router.Result.ParamError.Invalid(p, v, ex) => val literalV = literalize(v) val rendered = {renderArgShort(p)} s"$rendered: ${p.typeString} = $literalV failed to parse with $ex" case Router.Result.ParamError.DefaultFailed(p, ex) => s"${renderArgShort(p)}'s default value failed to evaluate with $ex" } Res.Failure( Util.normalizeNewlines( s"""The following $argumentsStr failed to parse: | |${thingies.mkString(Util.newLine)} | |expected signature: | |$expectedMsg """.stripMargin ) ) } } def softWrap(s: String, leftOffset: Int, maxWidth: Int) = { val oneLine = s.lines.mkString(" ").split(' ') lazy val indent = " " * leftOffset val output = new StringBuilder(oneLine.head) var currentLineWidth = oneLine.head.length for(chunk <- oneLine.tail){ val addedWidth = currentLineWidth + chunk.length + 1 if (addedWidth > maxWidth){ output.append(Util.newLine + indent) output.append(chunk) currentLineWidth = chunk.length } else{ currentLineWidth = addedWidth output.append(' ') output.append(chunk) } } output.mkString } def renderArgShort(arg: ArgSig) = "--" + backtickWrap(arg.name) def renderArg(arg: ArgSig, leftOffset: Int, wrappedWidth: Int): (String, String) = { val suffix = arg.default match{ case Some(f) => " (default " + f() + ")" case None => "" } val docSuffix = arg.doc match{ case Some(d) => ": " + d case None => "" } val wrapped = softWrap( arg.typeString + suffix + docSuffix, leftOffset, wrappedWidth - leftOffset ) (renderArgShort(arg), wrapped) } def mainMethodDetails(ep: EntryPoint) = { ep.argSignatures.collect{ case ArgSig(name, tpe, Some(doc), default) => Util.newLine + name + " // " + doc }.mkString } /** * Additional [[scopt.Read]] instance to teach it how to read Ammonite paths */ implicit def pathScoptRead: scopt.Read[Path] = scopt.Read.stringRead.map(Path(_, pwd)) } /** * Give Ammonite the ability to read (linux) system proxy environment variables * and convert them into java proxy properties. Which allows Ammonite to work * through proxy automatically, instead of setting `System.properties` manually. * * See issue 460. * * Parameter pattern: * https://docs.oracle.com/javase/7/docs/api/java/net/doc-files/net-properties.html * * Created by cuz on 17-5-21. */ private[dynaml] object ProxyFromEnv { private lazy val KeyPattern ="""([\w\d]+)_proxy""".r private lazy val UrlPattern ="""([\w\d]+://)?(.+@)?([\w\d\.]+):(\d+)/?""".r /** * Get current proxy environment variables. */ private def getEnvs = sys.env.map { case (k, v) => (k.toLowerCase, v.toLowerCase) } .filterKeys(_.endsWith("proxy")) /** * Convert single proxy environment variable to corresponding system proxy properties. */ private def envToProps(env: (String, String)): Map[String, String] = env match { case ("no_proxy", noProxySeq) => val converted = noProxySeq.split(""",""").mkString("|") //https uses the same as http's. Ftp need not to be set here. Map("http.nonProxyHosts" -> converted) case (KeyPattern(proto), UrlPattern(_, cred, host, port)) => val propHost = s"$proto.proxyHost" -> host val propPort = s"$proto.proxyPort" -> port val propCred = if (cred.isDefined) { val credPair = cred.dropRight(1).split(":") val propUser = s"$proto.proxyUser" -> credPair.head val propPassword = credPair.drop(1).map(s"$proto.proxyPassword" -> _) Seq(propUser) ++ propPassword } else Nil Seq(propHost, propPort) ++ propCred toMap case bad => Map.empty } /** * Set system proxy properties from environment variables. * Existing properties will not be overwritten. */ def setPropProxyFromEnv(envs: Map[String, String] = this.getEnvs): Unit = { val sysProps = sys.props val proxyProps = envs.flatMap { env => val props = envToProps(env) if (props.isEmpty) println(s"Warn: environment variable$env cannot be parsed.") props }.filter(p => !sysProps.exists(sp => sp._1 == p._1)) sysProps ++= proxyProps } /** * helper implicit conversion: add isDefined method to String. */ implicit private class StringIsDefined(s: String) { def isDefined: Boolean = s != null && s.length > 0 } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/statespace/StateSpace.scala
package io.github.mandar2812.dynaml.models.statespace import POMP._ import breeze.stats.distributions.{Rand, Gaussian, MultivariateGaussian} import breeze.linalg.{diag, DenseVector} import breeze.numerics.{exp, sqrt} import breeze.stats.distributions.{Gaussian, Rand} object StateSpace { /** * Steps all the states using the identity * @param p a Parameter * @return a function from state, dt => State */ def stepNull(p: Parameters): (State, TimeIncrement) => Rand[State] = { (s, dt) => new Rand[State] { def draw = State.map(s)(x => x) } } /** * A step function for generalised brownian motion, dx_t = mu dt + sigma dW_t * @param p an sde parameter * @return A function from state, time increment to state */ def stepBrownian(p: SdeParameter): (State, TimeIncrement) => Rand[State] = { (s, dt) => p match { case BrownianParameter(mu, sigma) => { new Rand[State] { def draw = s map (x => DenseVector((x.data, mu.data, diag(sigma).toArray).zipped. map { case (a, m, sd) => Gaussian(a + m * dt, Math.sqrt(sd * sd * dt)).draw })) } } } } /** * Steps the state by the value of the parameter "a" * multiplied by the time increment "dt" * @param p a parameter Map * @return a function from (State, dt) => State, with the * states being the same structure before and after */ def stepConstant(p: SdeParameter): (State, TimeIncrement) => Rand[State] = { (s, dt) => p match { case StepConstantParameter(a) => new Rand[State] { def draw = s map (_ + (a :* dt)) } } } /** * A step function for the Ornstein Uhlenbeck process dx_t = - alpha x_t dt + sigma dW_t * @param p the parameters of the ornstein uhlenbeck process, theta, alpha and sigma * @return */ def stepOrnstein(p: SdeParameter): (State, TimeIncrement) => Rand[State] = { (s, dt) => new Rand[State] { def draw = p match { case OrnsteinParameter(theta, alpha, sigma) => s map { x => // calculate the mean of the solution val mean = (x.data, alpha.data, theta.data).zipped map { case (state, a, t) => t + (state - t) * exp(- a * dt) } // calculate the variance of the solution val variance = (sigma.data, alpha.data).zipped map { case (s, a) => (s*s/2*a)*(1-exp(-2*a*dt)) } DenseVector(mean.zip(variance) map { case (a, v) => Gaussian(a, sqrt(v)).draw() }) } } } } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/kernels/PeriodicKernel.scala
package io.github.mandar2812.dynaml.kernels import breeze.linalg.{norm, DenseMatrix, DenseVector} /** * @author mandar2812 * * Implementation of the periodic kernel * K(x,y) = exp(-2*sin^2(pi*omega*x-y/l^2)) * */ class PeriodicKernel(private var lengthscale: Double = 1.0, private var freq: Double = 1.0) extends SVMKernel[DenseMatrix[Double]] with LocalScalarKernel[DenseVector[Double]] with Serializable { override val hyper_parameters = List("lengthscale", "frequency") state = Map("lengthscale" -> lengthscale, "frequency" -> freq) def setlengthscale(d: Double): Unit = { this.state += ("lengthscale" -> d) this.lengthscale = d } def setfrequency(f: Double): Unit = { this.state += ("frequency" -> f) this.freq = f } override def evaluateAt( config: Map[String, Double])( x: DenseVector[Double], y: DenseVector[Double]): Double = { val diff = x - y Math.exp(-2*math.pow(math.sin(norm(diff, 1)*math.Pi*config("frequency")), 2)/ (2*math.pow(config("lengthscale"), 2))) } override def gradientAt( config: Map[String, Double])( x: DenseVector[Double], y: DenseVector[Double]): Map[String, Double] = { val diff = norm(x-y, 1) val k = math.Pi*config("frequency")*diff/math.pow(config("lengthscale"), 2) Map( "frequency" -> -2.0*evaluateAt(config)(x,y)*math.sin(2.0*k)*k/config("frequency"), "lengthscale" -> 4.0*evaluateAt(config)(x,y)*math.sin(2*k)*k/config("lengthscale") ) } def getlengthscale: Double = this.lengthscale } class PeriodicCovFunc(private var lengthscale: Double = 1.0, private var freq: Double = 1.0) extends LocalScalarKernel[Double] { override val hyper_parameters = List("lengthscale", "frequency") state = Map("lengthscale" -> lengthscale, "frequency" -> freq) override def evaluateAt( config: Map[String, Double])( x: Double, y: Double): Double = { val diff = x - y Math.exp(-2*math.pow(math.sin(diff*math.Pi*config("frequency")), 2)/ (2*math.pow(config("lengthscale"), 2))) } override def gradientAt( config: Map[String, Double])( x: Double, y: Double): Map[String, Double] = { val diff = math.abs(x-y) val k = math.Pi*config("frequency")*diff/math.pow(config("lengthscale"), 2) Map( "frequency" -> -2.0*evaluateAt(config)(x,y)*math.sin(2.0*k)*k/config("frequency"), "lengthscale" -> 4.0*evaluateAt(config)(x,y)*math.sin(2*k)*k/config("lengthscale") ) } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/statespace/PMMH.scala
<reponame>amitkumarj441/DynaML<filename>dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/models/statespace/PMMH.scala package io.github.mandar2812.dynaml.models.statespace import breeze.stats.distributions.{Uniform, Rand, Process, MarkovChain} import POMP._ import akka.stream.scaladsl._ case class MetropState(ll: LogLikelihood, params: Parameters, accepted: Int) trait MetropolisHastings { /** * Proposal density, to propose new parameters for a model */ def proposal: Parameters => Rand[Parameters] /** * Definition of the log-transition, used when calculating the acceptance ratio * This is the probability of moving between parameters according to the proposal distribution * Note: When using a symmetric proposal distribution (eg. Normal) this cancels in the acceptance ratio * @param from the previous parameter value * @param to the proposed parameter value */ def logTransition(from: Parameters, to: Parameters): LogLikelihood val initialParams: Parameters /** * The likelihood function of the model, typically a pseudo-marginal likelihood for * the PMMH algorithm */ def logLikelihood: Parameters => LogLikelihood /** * Metropolis-Hastings step, for use in the iters function, to return an Akka stream of iterations */ def mhStep: MetropState => Option[(MetropState, MetropState)] = p => { val propParams = proposal(p.params).draw val propll = logLikelihood(propParams) val a = propll - p.ll + logTransition(propParams, p.params) - logTransition(p.params, propParams) if (math.log(Uniform(0, 1).draw) < a) { Some((MetropState(propll, propParams, p.accepted + 1), p)) } else { Some((p, p)) } } /** * Generates an akka stream of MetropState, containing the current parameters, * count of accepted moves and the current pseudo marginal log-likelihood */ def iters: Source[MetropState, Any] = { val initState = MetropState(logLikelihood(initialParams), initialParams, 0) Source.unfold(initState)(mhStep) } /** * Return an akka stream of the parameters */ def params: Source[Parameters, Any] = { iters map (_.params) } /** * A single step of the metropolis hastings algorithm to be used with breeze implementation of Markov Chain. * This is a slight alteration to the implementation in breeze, here MetropState holds on to the previous * calculated pseudo marginal log-likelihood value so we don't need to re-run the particle filter each iteration */ def mhStepRand: MetropState => Rand[MetropState] = p => { for { propParams <- proposal(p.params) propll = logLikelihood(propParams) a = propll - p.ll + logTransition(propParams, p.params) - logTransition(p.params, propParams) prop = if (math.log(Uniform(0,1).draw) < a) { MetropState(propll, propParams, p.accepted + 1) } else { p } } yield prop } /** * Use the Breeze Markov Chain to generate a process of MetropState * Process can be advanced by calling step */ def breezeIters: Process[MetropState] = { val initState = MetropState(logLikelihood(initialParams), initialParams, 0) MarkovChain(initState)(mhStepRand) } } case class ParticleMetropolis( mll: Parameters => LogLikelihood, initParams: Parameters, perturb: Parameters => Rand[Parameters]) extends MetropolisHastings{ def logLikelihood: Parameters => LogLikelihood = mll def logTransition(from: Parameters, to: Parameters): LogLikelihood = 0.0 def proposal: Parameters => Rand[Parameters] = perturb val initialParams = initParams } case class ParticleMetropolisHastings( mll: Parameters => LogLikelihood, transitionProb: (Parameters, Parameters) => LogLikelihood, propParams: Parameters => Rand[Parameters], initParams: Parameters) extends MetropolisHastings { def logLikelihood: Parameters => LogLikelihood = mll def logTransition(from: Parameters, to: Parameters): LogLikelihood = transitionProb(from, to) def proposal: Parameters => Rand[Parameters] = propParams val initialParams = initParams }
amitkumarj441/DynaML
dynaml-examples/src/main/scala-2.11/io/github/mandar2812/dynaml/examples/TestDelve.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.examples import java.io.File import breeze.linalg.{DenseMatrix, DenseVector} import com.github.tototoshi.csv.CSVWriter import io.github.mandar2812.dynaml.kernels.{RBFKernel, SVMKernel} import io.github.mandar2812.dynaml.models.KernelizedModel import io.github.mandar2812.dynaml.models.svm.{KernelSparkModel, LSSVMSparkModel} import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} /** * @author mandar2812 on 22/6/15. */ object TestDelve { def apply(nCores: Int = 4, prototypes: Int = 1, kernel: String, globalOptMethod: String = "gs", grid: Int = 7, step: Double = 0.45, logscale: Boolean = false, frac: Double, executors: Int = 1, paraFactor: Int = 2, csaIt: Int = 5): DenseVector[Double] = { val config = Map("file" -> "data/fried_delve.data", "delim" -> ",", "head" -> "false", "task" -> "regression") val configtest = Map("file" -> "data/fried_delve_test.data", "delim" -> ",", "head" -> "false") val conf = new SparkConf().setAppName("Fried Delve Synthetic Data").setMaster("local["+nCores+"]") conf.registerKryoClasses(Array(classOf[LSSVMSparkModel], classOf[KernelSparkModel], classOf[KernelizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], DenseVector[Double], DenseVector[Double], Double, Int, Int]], classOf[SVMKernel[DenseMatrix[Double]]], classOf[RBFKernel], classOf[DenseVector[Double]], classOf[DenseMatrix[Double]])) val sc = new SparkContext(conf) val model = LSSVMSparkModel(config, sc) val nProt = if (kernel == "Linear") { model.npoints.toInt } else { if(prototypes > 0) prototypes else math.sqrt(model.npoints.toDouble).toInt } val (optModel, optConfig) = KernelizedModel.getOptimizedModel[RDD[(Long, LabeledPoint)], RDD[LabeledPoint], model.type](model, globalOptMethod, kernel, nProt, grid, step, logscale, csaIt) optModel.learn() val met = optModel.evaluate(configtest) met.print() println("Optimal Configuration: "+optConfig) val scale = if(logscale) "log" else "linear" val perf = met.kpi() val row = Seq(kernel, prototypes.toString, globalOptMethod, grid.toString, step.toString, scale, perf(0), perf(1), perf(2), optConfig.toString) val writer = CSVWriter.open(new File("data/resultsDelve.csv"), append = true) writer.writeRow(row) writer.close() optModel.unpersist perf } }
amitkumarj441/DynaML
dynaml-examples/src/main/scala-2.11/io/github/mandar2812/dynaml/examples/TestNNDelve.scala
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.examples import breeze.linalg.{DenseVector => BDV} import io.github.mandar2812.dynaml.evaluation.RegressionMetrics import io.github.mandar2812.dynaml.graph.FFNeuralGraph import io.github.mandar2812.dynaml.modelpipe.GLMPipe import io.github.mandar2812.dynaml.models.lm.GeneralizedLinearModel import io.github.mandar2812.dynaml.models.neuralnets.{Activation, FeedForwardNetwork, GenericFFNeuralNet, NeuralStackFactory} import io.github.mandar2812.dynaml.pipes.{DataPipe, _} import io.github.mandar2812.dynaml.utils import io.github.mandar2812.dynaml.DynaMLPipe._ import io.github.mandar2812.dynaml.optimization.FFBackProp import io.github.mandar2812.dynaml.utils.GaussianScaler /** * Created by mandar on 11/1/16. */ object TestNNDelve { implicit val transform: DataPipe[ Stream[(BDV[Double], Double)], Stream[(BDV[Double], BDV[Double])]] = DataPipe((d: Stream[(BDV[Double], Double)]) => d.map(el => (el._1, BDV(el._2)))) def apply (hidden: Int = 2, nCounts:List[Int] = List(), acts:List[String], training: Int = 100, test: Int = 1000, columns: List[Int] = List(10,0,1,2,3,4,5,6,7,8,9), stepSize: Double = 0.01, maxIt: Int = 30, mini: Double = 1.0, alpha: Double = 0.5, regularization: Double = 0.5): Unit = { //Load Housing data into a stream //Extract the time and Dst values //separate data into training and test //pipe training data to model and then generate test predictions //create RegressionMetrics instance and produce plots val extractTrainingFeatures = (l: Stream[String]) => utils.extractColumns(l, ",", columns, Map()) val normalizeData = (trainTest: (Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)])) => { val (mean, variance) = utils.getStats(trainTest._1.map(tup => BDV(tup._1.toArray ++ Array(tup._2))).toList) val stdDev: BDV[Double] = variance.map(v => math.sqrt(v/(trainTest._1.length.toDouble - 1.0))) val normalizationFunc = (point: (BDV[Double], Double)) => { val extendedpoint = BDV(point._1.toArray ++ Array(point._2)) val normPoint = (extendedpoint - mean) :/ stdDev val length = normPoint.length (normPoint(0 until length), normPoint(-1)) } ((trainTest._1.map(normalizationFunc), trainTest._2.map(normalizationFunc)), (mean, stdDev)) } val modelTrainTest = (trainTest: ((Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)]), (BDV[Double], BDV[Double]))) => { val gr = FFNeuralGraph(trainTest._1._1.head._1.length, 1, hidden, acts, nCounts) val model = new FeedForwardNetwork[Stream[(BDV[Double], Double)]](trainTest._1._1, gr) model.setLearningRate(stepSize) .setMaxIterations(maxIt) .setBatchFraction(mini) .setMomentum(alpha) .setRegParam(regularization) .learn() val res = model.test(trainTest._1._2) val scoresAndLabelsPipe = DataPipe( (res: Seq[(BDV[Double], BDV[Double])]) => res.map(i => (i._1(0), i._2(0))).toList) > DataPipe((list: List[(Double, Double)]) => list.map{l => (l._1*trainTest._2._2(-1) + trainTest._2._1(-1), l._2*trainTest._2._2(-1) + trainTest._2._1(-1))}) val scoresAndLabels = scoresAndLabelsPipe.run(res) val metrics = new RegressionMetrics(scoresAndLabels, scoresAndLabels.length) metrics.print() metrics.generatePlots() } val preProcessPipe = DataPipe(utils.textFileToStream _) > DataPipe(extractTrainingFeatures) > StreamDataPipe((line: String) => { val split = line.split(",") (BDV(split.tail.map(_.toDouble)), split.head.toDouble) }) val trainTestPipe = DataPipe(preProcessPipe, preProcessPipe) > DataPipe((data: (Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)])) => { (data._1.take(training), data._2.takeRight(test)) }) > DataPipe(normalizeData) > DataPipe(modelTrainTest) trainTestPipe.run(("data/delve.csv", "data/delve.csv")) } } object TestNeuralStackDelve { def apply( nCounts: List[Int], acts: List[Activation[BDV[Double]]], training: Int = 100, test: Int = 1000, columns: List[Int] = List(10,0,1,2,3,4,5,6,7,8,9), stepSize: Double = 0.01, maxIt: Int = 30, mini: Double = 1.0, alpha: Double = 0.5, regularization: Double = 0.5) = { type Data = Stream[(BDV[Double], BDV[Double])] type Scales = (GaussianScaler, GaussianScaler) val readData = fileToStream > extractTrainingFeatures(columns, Map()) > splitFeaturesAndTargets > StreamDataPipe((pattern: (BDV[Double], Double)) => (pattern._1, BDV(pattern._2))) val preProcessPipe = duplicate(readData) > splitTrainingTest(training, test) > gaussianScalingTrainTest val modelTrainTestPipe = DataPipe((dataAndScales: (Data, Data, Scales)) => { /* * First create the elements needed for * the neural architecture: * * 1. A Stack Factory * 2. A random weight generator * 3. A learning procedure * */ val (trainingData, testData, scales) = dataAndScales /* * The stack factory will create the architecture to the specification * every time it is given the layer parameters. * */ val stackFactory = NeuralStackFactory(nCounts)(acts) val weightsInitializer = GenericFFNeuralNet.getWeightInitializer(nCounts) val backPropOptimizer = new FFBackProp(stackFactory) .setNumIterations(maxIt) .setRegParam(regularization) .setStepSize(stepSize) .setMiniBatchFraction(mini) .momentum_(alpha) val ff_neural_net = GenericFFNeuralNet( backPropOptimizer, trainingData, identityPipe[Data], weightsInitializer) /* * Train the model * */ ff_neural_net.learn() /* * Generate predictions for the test data * and evaluate performance. * */ val predictionsAndTargets = (scales._2*scales._2).i(testData.map(p => (ff_neural_net.predict(p._1), p._2))) (ff_neural_net, new RegressionMetrics( predictionsAndTargets.map(c => (c._1(0), c._2(0))).toList, predictionsAndTargets.length)) }) val dataFlow = preProcessPipe > modelTrainTestPipe val dataFile = dataDir+"/delve.csv" dataFlow((dataFile, dataFile)) } } object TestGLMDelve { def apply(training: Int = 100, test: Int = 1000, columns: List[Int] = List(10,0,1,2,3,4,5,6,7,8,9), stepSize: Double = 0.01, maxIt: Int = 30, mini: Double = 1.0, alpha: Double = 0.5, regularization: Double = 0.5) = { val modelpipe = new GLMPipe( (tt: ((Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)]), (BDV[Double], BDV[Double]))) => tt._1._1) > trainParametricModel[ Stream[(BDV[Double], Double)], BDV[Double], BDV[Double], Double, Stream[(BDV[Double], Double)], GeneralizedLinearModel[Stream[(BDV[Double], Double)]] ](regularization, stepSize, maxIt, mini) val testPipe = DataPipe( (modelAndData: ( GeneralizedLinearModel[Stream[(BDV[Double], Double)]], (Stream[(BDV[Double], Double)], BDV[Double], BDV[Double]))) => { val pipe1 = StreamDataPipe((couple: (BDV[Double], Double)) => { (modelAndData._1.predict(couple._1), couple._2) }) val means = modelAndData._2._2 val stdDevs = modelAndData._2._3 val scoresAndLabelsPipe = pipe1 > StreamDataPipe((s: (Double, Double)) => ((s._1*stdDevs(-1)) + means(-1), (s._2*stdDevs(-1))+means(-1)) ) val scoresAndLabels = scoresAndLabelsPipe.run(modelAndData._2._1).toList val metrics = new RegressionMetrics( scoresAndLabels, scoresAndLabels.length) metrics.setName("<NAME>") metrics.print() metrics.generatePlots() }) val preProcessPipe = fileToStream > extractTrainingFeatures(columns, Map()) > splitFeaturesAndTargets val trainTestPipe = DataPipe(preProcessPipe, preProcessPipe) > DataPipe((data: (Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)])) => { (data._1.take(training), data._2.takeRight(test)) }) > trainTestGaussianStandardization > BifurcationPipe(modelpipe, DataPipe((tt: ((Stream[(BDV[Double], Double)], Stream[(BDV[Double], Double)]), (BDV[Double], BDV[Double]))) => (tt._1._2, tt._2._1, tt._2._2))) > testPipe trainTestPipe run ("data/delve.csv", "data/delve.csv") } }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/evaluation/BinaryClassificationMetricsSpark.scala
<filename>dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/evaluation/BinaryClassificationMetricsSpark.scala /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.evaluation import breeze.linalg.DenseVector import com.quantifind.charts.Highcharts._ import org.apache.log4j.{Priority, Logger} import org.apache.spark.rdd.RDD import scalax.chart.module.ChartFactories.{XYAreaChart, XYLineChart} /** * Class implementing the calculation * of different binary classification * performance metrics * * */ class BinaryClassificationMetricsSpark( protected val scores: RDD[(Double, Double)], val len: Long, minmax: (Double, Double)) extends Metrics[Double]{ override protected val scoresAndLabels = List() private val logger = Logger.getLogger(this.getClass) val length = len /** * A list of threshold values from * -1.0 to 1.0 in 100 steps. These * will be used to measure the variation * in precision, recall False Positive * and False Negative values. * */ private val (scMin, scMax) = minmax private val thresholds = List.tabulate(100)(i => { scMin + i.toDouble*((scMax.toInt - scMin.toInt + 1)/100.0)}) private var num_positives = 0.0 private var num_negatives = 0.0 private var tpfpList: List[(Double, (Double, Double))] = List() def scores_and_labels = this.scoresAndLabels private def areaUnderCurve(points: List[(Double, Double)]): Double = points.sliding(2) .map(l => (l(1)._1 - l.head._1) * (l(1)._2 + l.head._2)/2).sum /** * Calculate the area under the Precision-Recall * curve. * */ def areaUnderPR(): Double = areaUnderCurve(this.pr()) /** * Calculate the area under the Receiver * Operating Characteristic curve. * */ def areaUnderROC(): Double = areaUnderCurve(this.roc()) /** * Calculate the F1 metric by threshold, for a * beta value of 1.0 * */ def fMeasureByThreshold(): List[(Double, Double)] = fMeasureByThreshold(1.0) /** * Calculate the F1 metric by threshold, for an * arbitrary beta value * */ def fMeasureByThreshold(beta: Double): List[(Double, Double)] = { val tpfpbuf = if(tpfpList.isEmpty) tpfpByThreshold() else tpfpList tpfpbuf.map((couple) => { val tp = couple._2._1 val fp = couple._2._2 val betasq = math.pow(beta, 2.0) (couple._1, (1 + betasq)*tp/((1 + betasq)*tp + betasq*(1-tp) + fp)) }) } /** * Return the Precision-Recall curve, as a [[List]] * of [[Tuple2]] (Recall, Precision). * */ def pr(): List[(Double, Double)] = recallByThreshold().zip(precisionByThreshold()).map((couple) => (couple._1._2, couple._2._2)).sorted /** * Return the Recall-Threshold curve, as a [[List]] * of [[Tuple2]] (Threshold, Recall). * */ def recallByThreshold(): List[(Double, Double)] = { val tpfpbuf = if(tpfpList.isEmpty) tpfpByThreshold() else tpfpList tpfpbuf.map((point) => (point._1, point._2._1)) } /** * Return the Precision-Threshold curve, as a [[List]] * of [[Tuple2]] (Threshold, Precision). * */ def precisionByThreshold(): List[(Double, Double)] = { val tpfpbuf = if(tpfpList.isEmpty) tpfpByThreshold() else tpfpList tpfpbuf.map((point) => (point._1, point._2._1/(point._2._1 + point._2._2))) } /** * Return the Receiver Operating Characteristic * curve, as a [[List]] of [[Tuple2]] * (False Positive Rate, True Positive Rate). * */ def roc(): List[(Double, Double)] = { val tpfpbuf = if(tpfpList.isEmpty) tpfpByThreshold() else tpfpList tpfpbuf.map((point) => (point._2._2, point._2._1)).sorted } /** * Return the True Positive and False Positive Rate * with respect to the threshold, as a [[List]] * of [[Tuple2]] (Threshold, (True Positive rate, False Positive Rate)). * */ def tpfpByThreshold(): List[(Double, (Double, Double))] = { val positives = scores.context.accumulator(0.0, "positives") val negatives = scores.context.accumulator(0.0, "negatives") val ths = scores.context.broadcast(thresholds.length) val thres = scores.context.broadcast(thresholds) val (tp, fp) = this.scores.mapPartitions((scoresAndLabels) =>{ Seq(scoresAndLabels.map((sl) => { val (tpv, fpv): (DenseVector[Double], DenseVector[Double]) = if(sl._2 == 1.0) { positives += 1.0 (DenseVector.tabulate(ths.value)(i => { if(math.signum(sl._1 - thres.value(i)) == sl._2) 1.0 else 0.0 }), DenseVector.zeros(ths.value)) } else { negatives += 1.0 (DenseVector.zeros(ths.value), DenseVector.tabulate(ths.value)(i => { if(math.signum(sl._1 - thres.value(i)) == 1.0) 1.0 else 0.0 })) } (tpv,fpv) }).reduce((c1, c2) => { (c1._1+c2._1, c1._2+c2._2) })).toIterator }).reduce((c1, c2) => { (c1._1+c2._1, c1._2+c2._2) }) this.num_positives = positives.value this.num_negatives = negatives.value List.tabulate(thresholds.length){t => { (thresholds(t), (tp(t)/positives.value, fp(t)/negatives.value)) }} } def accuracyByThreshold(): List[(Double, Double)] = { val tpfpbuf = if(tpfpList.isEmpty) tpfpByThreshold() else tpfpList tpfpbuf.map((t) => (t._1, (t._2._1*num_positives + (1.0-t._2._2)*num_negatives)/length.toDouble)) } /** * Generate the PR, ROC and F1 measure * plots using Scala-Chart. * */ override def generatePlots(): Unit = { val roccurve = this.roc() val prcurve = this.pr() val fm = this.fMeasureByThreshold() implicit val theme = org.jfree.chart.StandardChartTheme.createDarknessTheme scala.Predef.print("Generating ROC Plot") /*val chart1 = XYAreaChart(roccurve, title = "Receiver Operating Characteristic", legend = true) chart1.show() scala.Predef.print( "Generating PR Plot") val chart2 = XYAreaChart(prcurve, title = "Precision Recall Curve", legend = true) chart2.show() scala.Predef.print( "Generating F1 measure Plot") val chart3 = XYLineChart(fm, title = "F1 measure by threshold beta = 1", legend = true) chart3.show()*/ areaspline(roccurve.map(_._1), roccurve.map(_._2)) title("Receiver Operating Characteristic") xAxis("False Positives") yAxis("True Positives") } override def print(): Unit = { println("Classification Model Performance: "+name) println("============================") scala.Predef.print("Accuracy = ") pprint.pprintln(accuracyByThreshold().map((c) => c._2).max) scala.Predef.print("Area under ROC = ") pprint.pprintln(areaUnderROC()) } override def kpi() = { this.tpfpList = this.tpfpByThreshold() DenseVector(accuracyByThreshold().map((c) => c._2).max, fMeasureByThreshold().map((c) => c._2).max, areaUnderROC())} }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/tensorflow/api/Api.scala
<gh_stars>0 package io.github.mandar2812.dynaml.tensorflow.api import java.nio.ByteBuffer import io.github.mandar2812.dynaml.probability.RandomVariable import org.platanios.tensorflow.api.tensors.TensorConvertible import org.platanios.tensorflow.api.types.{DataType, SupportedType} import org.platanios.tensorflow.api.{FLOAT16, FLOAT32, FLOAT64, INT16, INT32, INT64, Shape, Tensor, tfi} private[tensorflow] object Api { /** * Construct a tensor from a list of elements. * * @tparam T The type of the elements * * @param dtype The tensorflow data type of the elements, * this is usually defined by tensorflow scala * i.e. FLOAT64, INT32 etc * * @param shape The shape of the tensor i.e. Shape(1,2,3) * denotes a rank 3 tensor with 1, 2 and 3 dimensions * for the ranks respectively. * * @param buffer The elements of type [[T]], can accept varying * number of arguments. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_from(INT32, Shape(1, 2, 3))(1, 2, 3, 4, 5, 6) * * */ def tensor_from[T](dtype: DataType.Aux[T], shape: Shape)(buffer: T*)(implicit ev: TensorConvertible[T]): Tensor = { Tensor(dtype, buffer.head, buffer.tail:_*).reshape(shape) } /** * Construct a tensor from a list of elements. * * @tparam T The type of the elements * * @param dtype The tensorflow data type of the elements, * as a string i.e. "FLOAT64", "INT32" etc * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements of type [[T]], as a Sequence * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_from("INT32", 1, 2, 3)((1 to 6).toSeq) * * */ def tensor_from[T](dtype: String, shape: Int*)(buffer: Seq[T])(implicit ev: TensorConvertible[T]): Tensor = { Tensor(DataType.fromName(dtype), buffer.head, buffer.tail:_*).reshape(Shape(shape:_*)) } /** * Construct a tensor from a array of bytes. * * @tparam T The type of the elements * * @param dtype The tensorflow data type of the elements, * as a string i.e. "FLOAT64", "INT32" etc * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements as a contiguous array of bytes * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_from_buffer(FLOAT32, 1, 1)((1 to 4).toArray.map(_.toByte)) * * */ def tensor_from_buffer[T]( dtype: DataType.Aux[T], shape: Shape)( buffer: Array[Byte]): Tensor = { Tensor.fromBuffer(dtype, shape, buffer.length.toLong, ByteBuffer.wrap(buffer)) } /** * Construct a tensor from a array of bytes. * * @param dtype The tensorflow data type of the elements, * as a string i.e. "FLOAT64", "INT32" etc * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements as a contiguous array of bytes * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_from_buffer("FLOAT32", 1, 1)((1 to 4).toArray.map(_.toByte)) * * */ def tensor_from_buffer( dtype: String, shape: Int*)( buffer: Array[Byte]): Tensor = Tensor.fromBuffer( DataType.fromName(dtype), Shape(shape:_*), buffer.length.toLong, ByteBuffer.wrap(buffer)) /** * Construct an 16 bit integer tensor from a list of elements. * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements in row major format. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_i16(1, 2, 3)(1, 2, 3, 4, 5, 6) * * */ def tensor_i16(shape: Int*)(buffer: Int*)(implicit ev: TensorConvertible[Int]) = Tensor(INT16, buffer.head, buffer.tail:_*).reshape(shape) /** * Construct an 32 bit integer tensor from a list of elements. * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements in row major format. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_i32(1, 2, 3)(1, 2, 3, 4, 5, 6) * * */ def tensor_i32(shape: Int*)(buffer: Int*)(implicit ev: TensorConvertible[Int]) = Tensor(INT32, buffer.head, buffer.tail:_*).reshape(shape) /** * Construct an 64 bit integer tensor from a list of elements. * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements in row major format. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_i64(1, 2, 3)(1, 2, 3, 4, 5, 6) * * */ def tensor_i64(shape: Int*)(buffer: Int*)(implicit ev: TensorConvertible[Int]) = Tensor(INT64, buffer.head, buffer.tail:_*).reshape(shape) /** * Construct an 16 bit floating point tensor from a list of elements. * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements in row major format. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_f16(1, 2, 3)(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) * * */ def tensor_f16(shape: Int*)(buffer: Double*)(implicit ev: TensorConvertible[Double]) = Tensor(FLOAT16, buffer.head, buffer.tail:_*).reshape(shape) /** * Construct an 32 bit floating point tensor from a list of elements. * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements in row major format. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_f32(1, 2, 3)(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) * * */ def tensor_f32(shape: Int*)(buffer: Double*)(implicit ev: TensorConvertible[Double]) = Tensor(FLOAT32, buffer.head, buffer.tail:_*).reshape(shape) /** * Construct an 64 bit floating point tensor from a list of elements. * * @param shape The shape of the tensor given as any number * of integer arguments. * * @param buffer The elements in row major format. * * @return A tensorflow [[Tensor]] of the appropriate data type * and shape. * * <b>Usage</b> dtf.tensor_f64(1, 2, 3)(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) * * */ def tensor_f64(shape: Int*)(buffer: Double*)(implicit ev: TensorConvertible[Double]) = Tensor(FLOAT64, buffer.head, buffer.tail:_*).reshape(shape) /** * Stack a list of tensors, the use must ensure that * the shapes of the tensors are appropriate for a stack * operation. * * @param inputs A sequence of tensors. * * @param axis The axis along which they should be stacked. * * @return The larger stacked tensor. * */ def stack(inputs: Seq[Tensor], axis: Int = 0): Tensor = tfi.stack(inputs, axis) /** * Split a tensor into a list of tensors. * */ def unstack(input: Tensor, number: Int = -1, axis: Int = 0): Seq[Tensor] = tfi.unstack(input, number, axis) def concatenate(inputs: Seq[Tensor], axis: Tensor = 0): Tensor = tfi.concatenate(inputs, axis) /** * Generate a random tensor with independent and * identically distributed elements drawn from a * [[RandomVariable]] instance. * */ def random[T](dtype: DataType.Aux[T], shape: Int*)(rv: RandomVariable[T])(implicit ev: TensorConvertible[T]) : Tensor = { val buffer = rv.iid(shape.product).draw Tensor(dtype, buffer.head, buffer.tail:_*).reshape(Shape(shape:_*)) } /** * Fill a tensor with a fixed value. * */ def fill[T](dataType: DataType.Aux[T], shape: Int*)(value: T)(implicit ev: SupportedType[T]): Tensor = Tensor.fill(dataType, Shape(shape:_*))(value) def fill[T](dataType: DataType.Aux[T], shape: Shape)(value: T)(implicit ev: SupportedType[T]): Tensor = Tensor.fill(dataType, shape)(value) }
amitkumarj441/DynaML
dynaml-core/src/main/scala-2.11/io/github/mandar2812/dynaml/kernels/GenericMaternKernel.scala
<reponame>amitkumarj441/DynaML /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.kernels import breeze.linalg.DenseMatrix import breeze.numerics.{exp, lgamma, sqrt} import spire.algebra.{Field, InnerProductSpace} import io.github.mandar2812.dynaml.utils._ /** * <h3>Matern Half Integer Covariance Family</h3> * * Implementation of the half integer Matern * covariance function, for arbitrary domains. * * @author mandar2812 date: 27/01/2017. * * * */ class GenericMaternKernel[T](private var l: Double, p: Int = 2)( implicit evInner: InnerProductSpace[T, Double], evField: Field[T]) extends StationaryKernel[T, Double, DenseMatrix[Double]] with LocalScalarKernel[T] with Serializable { self => override val hyper_parameters = List("p", "l") blocked_hyper_parameters = List("p") state = Map("p" -> p.toDouble, "l" -> l) override def setHyperParameters(h: Map[String, Double]) = { super.setHyperParameters(h) if(h contains "p") state += ("p" -> math.floor(math.abs(h("p")))) this } override def evalAt(config: Map[String, Double])(x: T) = { val r = math.sqrt(evInner.dot(x, x)) val nu = config("p") + 0.5 val lengthscale = config("l") val order = config("p").toInt val leadingTerm = exp(-sqrt(2*nu)*r/lengthscale)*exp(lgamma(order + 1) - lgamma(2*order + 1)) val sumTerm = (0 to order).map(i => { math.pow(sqrt(8*nu)*r/lengthscale, order-i)*(factorial(order+i)/(factorial(i)*factorial(order-i))) }).sum leadingTerm*sumTerm } override def gradientAt(config: Map[String, Double])(x: T, y: T) = { val diff = evField.minus(x, y) val r = math.sqrt(evInner.dot(diff, diff)) val nu = config("p") + 0.5 val lengthscale = config("l") val order = config("p").toInt val leadingTerm = exp(-sqrt(2*nu)*r/lengthscale)*exp(lgamma(order + 1) - lgamma(2*order + 1)) val sumTerm = (0 to order).map(i => { math.pow(sqrt(8*nu)*r/lengthscale, order-i)*(factorial(order+i)/(factorial(i)*factorial(order-i))) }).sum Map("l" -> { val diffLead = leadingTerm * sqrt(2*nu) * r/math.pow(lengthscale, 2.0) val diffSum = (0 to order).map(i => { -1*(order-i) * math.pow(sqrt(8*nu)* r/lengthscale, order-i) * (factorial(order+i)/(factorial(i)*factorial(order-i)))/lengthscale }).sum diffLead*sumTerm + diffSum*leadingTerm }) } } /** * Implementation of the half integer Matern-ARD * covariance function * * @author mandar2812 date: 27/01/2017. * * * */ abstract class GenericMaternARDKernel[T](private var l: T, private var p: Int = 3)( implicit evInner: InnerProductSpace[T, Double], evField: Field[T]) extends StationaryKernel[T, Double, DenseMatrix[Double]] with LocalScalarKernel[T] with Serializable { self => blocked_hyper_parameters = List("p") }