repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
---|---|---|
dorely103/Raphtory | mainproject/src/main/scala/com/raphtory/core/actors/Router/RouterWorker.scala | <reponame>dorely103/Raphtory<gh_stars>0
package com.raphtory.core.actors.Router
import akka.actor.{Actor, ActorLogging}
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import com.raphtory.core.actors.RaphtoryActor
import com.raphtory.core.actors.Router.RouterWorker.CommonMessage.TimeBroadcast
import com.raphtory.core.actors.Spout.SpoutAgent.CommonMessage.{NoWork, SpoutOnline, WorkPlease}
import com.raphtory.core.model.communication._
import kamon.Kamon
import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
// TODO Add val name which sub classes that extend this trait must overwrite
// e.g. BlockChainRouter val name = "Blockchain Router"
// Log.debug that read 'Router' should then read 'Blockchain Router'
class RouterWorker[T](val graphBuilder: GraphBuilder[T],val routerId: Int, val workerID: Int, val initialManagerCount: Int,val initialRouterCount:Int)
extends RaphtoryActor {
implicit val executionContext: ExecutionContext = context.system.dispatcher
//println(s"Router $routerId $workerID with $initialManagerCount $initialRouterCount")
private val messageIDs = ParTrieMap[String, Int]()
private val routerWorkerUpdates =
Kamon.counter("Raphtory_Router_Output").withTag("Router", routerId).withTag("Worker", workerID)
var update = 0
// todo: wvv let people know parseTuple will create a list of update message
// and this trait will handle logic to send to graph
final protected val mediator = DistributedPubSub(context.system).mediator
mediator ! DistributedPubSubMediator.Put(self)
//var timebroadcaster:Cancellable = _
override def preStart(): Unit = {
log.debug(s"RouterWorker [$routerId] is being started.")
context.system.scheduler
.scheduleOnce(delay = 5.seconds, receiver = self, message = TimeBroadcast)
}
override def receive: Receive = work(initialManagerCount, 0L, 0L)
private def work(managerCount: Int, trackedTime: Long, newestTime: Long): Receive = {
case SpoutOnline => context.sender() ! WorkPlease
case NoWork => context.system.scheduler.scheduleOnce(delay = 1.second, receiver = context.sender(), message = WorkPlease)
case msg: UpdatedCounter =>
log.debug(s"RouterWorker [$routerId] received [$msg] request.")
if (managerCount < msg.newValue) context.become(work(msg.newValue, trackedTime, newestTime))
case AllocateTuple(record: T) => //todo: wvv AllocateTuple should hold type of record instead of using Any
log.debug(s"RouterWorker [$routerId] received AllocateTuple[$record] request.")
parseTupleAndSendGraph(record, managerCount, false, trackedTime).foreach(newNewestTime =>
if(newNewestTime>newestTime)
context.become(work(managerCount, trackedTime, newNewestTime))
)
context.sender() ! WorkPlease
case msg @ AllocateTrackedTuple(
wallClock,
record: T
) => //todo: wvv AllocateTrackedTuple should hold type of record instead of using Any
log.debug(s"RouterWorker [$routerId] received [$msg] request.")
val newNewestTime = parseTupleAndSendGraph(record, managerCount, true, wallClock).getOrElse(newestTime)
context.become(work(managerCount, wallClock, newNewestTime))
context.sender() ! WorkPlease
case TimeBroadcast => {
getAllWriterWorkers(managerCount).foreach { workerPath =>
mediator ! DistributedPubSubMediator.Send(
workerPath,
RouterWorkerTimeSync(newestTime, s"${routerId}_$workerID", getMessageIDForWriter(workerPath)),
false
)
}
context.system.scheduler
.scheduleOnce(delay = 5.seconds, receiver = self, message = TimeBroadcast)
//println(s"Router $routerId $workerID with $newestTime ${messageIDs.mkString("[",",","]")}")
}
case DataFinished => {
getAllRouterWorkers(initialRouterCount).foreach { workerPath =>
mediator ! DistributedPubSubMediator.Send(
workerPath,
DataFinishedSync(newestTime),
false
)
}
}
case DataFinishedSync(time) => {
if (time >= newestTime) {
//println(s"Router $routerId $workerID ${time}")
getAllWriterWorkers(managerCount).foreach { workerPath =>
mediator ! DistributedPubSubMediator.Send(
workerPath,
RouterWorkerTimeSync(time, s"${routerId}_$workerID", getMessageIDForWriter(workerPath)),
false
)
}
context.become(work(managerCount, trackedTime, time))
}
else {
getAllRouterWorkers(initialRouterCount).foreach { workerPath =>
mediator ! DistributedPubSubMediator.Send(
workerPath,
DataFinishedSync(newestTime),
false
)
}
}
}
case unhandled => log.warning(s"RouterWorker received unknown [$unhandled] message.")
}
private def parseTupleAndSendGraph(
record: T,
managerCount: Int,
trackedMessage: Boolean,
trackedTime: Long
): Option[Long] =try{
graphBuilder.parseTuple(record)
graphBuilder.getUpdates().map(update => sendGraphUpdate(update, managerCount, trackedMessage, trackedTime)).lastOption
}catch {case e:Exception => None}
private def sendGraphUpdate(
message: GraphUpdate,
managerCount: Int,
trackedMessage: Boolean,
trackedTime: Long
): Long = {
update+=1
routerWorkerUpdates.increment()
val path = getManager(message.srcID, managerCount)
val id = getMessageIDForWriter(path)
val trackedTimeToUse = if (trackedMessage) trackedTime else -1L
val sentMessage = message match {
case m: VertexAdd =>
TrackedVertexAdd(s"${routerId}_$workerID", id, trackedTimeToUse, m)
case m: VertexAddWithProperties =>
TrackedVertexAddWithProperties(s"${routerId}_$workerID", id, trackedTimeToUse, m)
case m: EdgeAdd =>
TrackedEdgeAdd(s"${routerId}_$workerID", id, trackedTimeToUse, m)
case m: EdgeAddWithProperties =>
TrackedEdgeAddWithProperties(s"${routerId}_$workerID", id, trackedTimeToUse, m)
case m: VertexDelete =>
TrackedVertexDelete(s"${routerId}_$workerID", id, trackedTimeToUse, m)
case m: EdgeDelete =>
TrackedEdgeDelete(s"${routerId}_$workerID", id, trackedTimeToUse, m)
}
log.debug(s"RouterWorker sending message [$sentMessage] to PubSub")
if (trackedMessage)
mediator ! DistributedPubSubMediator
.Send("/user/WatermarkManager", UpdateArrivalTime(trackedTime, message.msgTime), localAffinity = false)
mediator ! DistributedPubSubMediator.Send(path, sentMessage, localAffinity = false)
message.msgTime
}
private def getMessageIDForWriter(path: String) =
messageIDs.get(path) match {
case Some(messageId) =>
messageIDs put (path, messageId + 1)
messageId
case None =>
messageIDs put (path, 1)
0
}
def getAllWriterWorkers(managerCount: Int): Array[String] = {
val workers = mutable.ArrayBuffer[String]()
for (i <- 0 until managerCount)
for (j <- 0 until totalWorkers)
workers += s"/user/Manager_${i}_child_$j"
workers.toArray
}
}
object RouterWorker {
object CommonMessage {
case object TimeBroadcast
}
}
|
dorely103/Raphtory | mainproject/src/main/scala/com/raphtory/core/model/communication/VertexMutliQueue.scala | <reponame>dorely103/Raphtory<filename>mainproject/src/main/scala/com/raphtory/core/model/communication/VertexMutliQueue.scala
package com.raphtory.core.model.communication
import com.raphtory.core.actors.PartitionManager.Workers.ViewJob
import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap
class VertexMutliQueue {
val evenMessageQueueMap = ParTrieMap[ViewJob, mutable.ArrayBuffer[Any]]()
val oddMessageQueueMap = ParTrieMap[ViewJob, mutable.ArrayBuffer[Any]]()
def getMessageQueue(jobID: ViewJob, superStep: Int): mutable.ArrayBuffer[Any] = {
val queueMap = if (superStep % 2 == 0) evenMessageQueueMap else oddMessageQueueMap
queueMap.get(jobID) match {
case Some(stack) => stack
case None =>
val newStack = mutable.ArrayBuffer[Any]()
queueMap(jobID) = newStack
newStack
}
}
def clearQueue(jobID: ViewJob, superStep: Int) = {
val queueMap = if (superStep % 2 == 0) evenMessageQueueMap else oddMessageQueueMap
queueMap.update(jobID, mutable.ArrayBuffer[Any]())
}
def clearQueues(jobID: ViewJob) = {
evenMessageQueueMap.get(jobID) match {
case Some(_) => evenMessageQueueMap.update(jobID, mutable.ArrayBuffer[Any]())
case None =>
}
oddMessageQueueMap.get(jobID) match {
case Some(_) => oddMessageQueueMap.update(jobID, mutable.ArrayBuffer[Any]())
case None =>
}
}
def receiveMessage(jobID: ViewJob, superStep: Int, data: Any) = getMessageQueue(jobID, superStep + 1) += (data)
}
|
dorely103/Raphtory | mainproject/src/main/scala/com/raphtory/core/model/communication/AnalysisType.scala | package com.raphtory.core.model.communication
object AnalysisType extends Enumeration {
val live: Value = Value("live")
val view: Value = Value("view")
val range: Value = Value("range")
}
|
karateboy/slcems | app/models/Record.scala | package models
import play.api.Logging
import scalikejdbc.{AutoSession, DBSession, SQLSyntax, scalikejdbcSQLInterpolationImplicitDef}
import java.sql.Timestamp
import java.time.LocalDateTime
case class Stat(
avg: Option[Double],
min: Option[Double],
max: Option[Double],
count: Int,
total: Int,
overCount: Int) {
val effectPercent: Option[Double] = if (total != 0) Some(count.toFloat * 100 / total) else None
val overPercent: Option[Double] = if (total != 0) Some(overCount.toFloat * 100 / total) else None
}
case class MonitorTypeRecord(monitorType: String, dataList: List[(Long, Option[Float], Option[String])], stat: Stat)
case class PowerRecord(monitorID: Int, dateTime: LocalDateTime,
generating: Double, storing: Option[Double], consuming: Double)
object Record extends Logging {
implicit val session: DBSession = AutoSession
def getLatestMonitorRecordTime(tabType: TableType.Value, monitorID: Int): Option[LocalDateTime] = {
val tabName = Record.getTabName(tabType)
sql"""
SELECT TOP 1 [DateTime]
FROM ${tabName}
Where [MonitorID] = $monitorID
ORDER BY [DateTime] DESC
""".map { r => r.localDateTime(1) }.single().apply()
}
def getLatestMonitorRecord(tableType: TableType.Value, monitorID: Int) = {
val tabName = Record.getTabName(tableType)
sql"""
SELECT TOP 1 *
FROM ${tabName}
Where [MonitorID] = $monitorID
ORDER BY [DateTime] DESC
""".map { rs =>
PowerRecord(rs.int(1), rs.localDateTime(2), rs.double(3),
rs.doubleOpt(4), rs.double(5))
}.single().apply()
}
def recalculateHour(monitorID: Int, start: LocalDateTime): Unit = {
val minData = getRecord(TableType.Min)(monitorID, start, start.plusHours(1))
if (minData.nonEmpty) {
val size = minData.size
val generating = minData.map(_.generating).sum / size
val storing = minData.flatMap(_.storing).sum / size
val consuming = minData.map(_.consuming).sum / size
Record.upsertPowerRecord(TableType.Hour)(PowerRecord(monitorID, start, generating, Some(storing), consuming))
} else {
Record.upsertPowerRecord(TableType.Hour)(PowerRecord(monitorID, start, 0, Some(0), 0))
}
}
def upsertPowerRecord(tabType: TableType.Value)(pwrRecord: PowerRecord): Int = {
val tabName = Record.getTabName(tabType)
val timestamp: java.sql.Timestamp = Timestamp.valueOf(pwrRecord.dateTime)
sql"""
UPDATE $tabName
SET [generating] = ${pwrRecord.generating}
,[storing] = ${pwrRecord.storing}
,[consuming] = ${pwrRecord.consuming}
WHERE [MonitorID]=${pwrRecord.monitorID} and [DateTime] = ${timestamp}
IF(@@ROWCOUNT = 0)
BEGIN
INSERT INTO $tabName
([MonitorID]
,[DateTime]
,[generating]
,[storing]
,[consuming])
VALUES
(${pwrRecord.monitorID}
,${timestamp}
,${pwrRecord.generating}
,${pwrRecord.storing}
,${pwrRecord.consuming})
END
""".update().apply()
}
def getRecord(tabType: TableType.Value)(monitorID: Int, start: LocalDateTime, end: LocalDateTime): List[PowerRecord] = {
val tabName = Record.getTabName(tabType)
val startT: Timestamp = Timestamp.valueOf(start)
val endT: Timestamp = Timestamp.valueOf(end)
sql"""
Select *
From $tabName
Where [DateTime] >= $startT and [DateTime] < $endT and [MonitorID] = $monitorID
""".map(rs => PowerRecord(rs.int(1), rs.localDateTime(2),
rs.double(3), rs.doubleOpt(4), rs.double(5))).list().apply()
}
def getRecordMap(tabType: TableType.Value)(monitor: String, start: LocalDateTime, end: LocalDateTime): Map[String, Seq[(LocalDateTime, Option[Double])]] = {
val tabName = Record.getTabName(tabType)
val monitorID = Monitor.idMap(monitor)
val startT: Timestamp = Timestamp.valueOf(start)
val endT: Timestamp = Timestamp.valueOf(end)
val retList: Seq[PowerRecord] =
sql"""
Select *
From $tabName
Where [DateTime] >= $startT and [DateTime] < $endT and [MonitorID] = $monitorID
Order by [DateTime] ASC
""".map(rs => PowerRecord(rs.int(1), rs.localDateTime(2),
rs.double(3), rs.doubleOpt(4), rs.double(5))).list().apply()
val pairs =
for {
mt <- MonitorType.map.keys
} yield {
val list =
for {
doc <- retList
dt = doc.dateTime
} yield {
mt match {
case "generating"=>
(dt, Some(Math.abs(doc.generating)))
case "storing" =>
(dt, doc.storing)
case "consuming" =>
(dt, Some(doc.consuming))
case "consumingPercent" =>
(dt, Some(doc.consuming*100/Monitor.map(monitorID).contractCapacity))
case "greenPercent"=>
if(doc.consuming == 0)
(dt, Some(0d))
else
(dt, Some(doc.generating*100/doc.consuming))
}
}
mt -> list
}
pairs.toMap
}
def getTabName(tab: TableType.Value) = {
tab match {
case TableType.Hour =>
SQLSyntax.createUnsafely(s"[HourRecord]")
case TableType.Min =>
SQLSyntax.createUnsafely(s"[MinRecord]")
}
}
}
|
karateboy/slcems | app/controllers/JWTAuthentication.scala | package controllers
import com.auth0.jwt.JWT
import com.auth0.jwt.algorithms.Algorithm
import com.auth0.jwt.interfaces.DecodedJWT
import play.api.Logging
import play.api.mvc.Results.Unauthorized
import play.api.mvc.Security.AuthenticatedBuilder
import play.api.mvc.{ActionBuilder, Request, RequestHeader, Result, WrappedRequest}
import java.time.Instant
import java.util.Date
import scala.concurrent.ExecutionContext.Implicits.global
case class UserCertificate(username:String, token:String)
case class UserRequest[A](userCertificate:UserCertificate, request: Request[A]) extends WrappedRequest(request)
object JWTAuthentication extends Logging {
val secret = "<KEY>"
val issuer= "WECC"
val algorithm = Algorithm.HMAC256(secret)
val verifier = JWT.require(algorithm).withIssuer(issuer).build()
def signToken(): String ={
try {
val now = Instant.now()
JWT.create()
.withIssuer(issuer)
.withIssuedAt(Date.from(now))
.withExpiresAt(Date.from(now.plusSeconds(60*30)))
.sign(algorithm)
} catch {
case ex: Exception =>
logger.error("Failed to sign token", ex)
throw ex;
}
}
def verifyToken(token:String): Boolean = {
try{
val jwt: DecodedJWT = verifier.verify(token)
true
}catch {
case ex:Exception=>
logger.error("failed to verify token", ex)
false
}
}
def getUserinfo(request: RequestHeader):Option[UserCertificate] = {
None
}
def onUnauthorized(request: RequestHeader): Result = {
Unauthorized("")
}
}
|
karateboy/slcems | app/controllers/DataLogger.scala | <reponame>karateboy/slcems<filename>app/controllers/DataLogger.scala
package controllers
import models._
import play.api.Logging
import play.api.libs.json._
import play.api.mvc.{AbstractController, Action, AnyContent, ControllerComponents}
import java.time.ZoneId
import java.util.Date
import javax.inject.{Inject, Singleton}
case class LatestRecordTime(time: Long)
case class MtRecord(mtName: String, value: Double, status: String)
case class RecordList(time: Date, var mtDataList: Seq[MtRecord], monitor: String) {
def mtMap: Map[String, MtRecord] = {
val pairs =
mtDataList map { data => data.mtName -> data }
pairs.toMap
}
}
@Singleton
class DataLogger @Inject()(cc: ControllerComponents)
extends AbstractController(cc) with Logging {
implicit val latestRecordTimeWrite = Json.writes[LatestRecordTime]
implicit val mtRecordRead = Json.reads[MtRecord]
implicit val RecordListRead = Json.reads[RecordList]
def getRecordRange(tabType: TableType.Value)(monitorStr: String): Action[AnyContent] = Action {
val monitorID = Monitor.idMap(monitorStr)
val timeOpt = Record.getLatestMonitorRecordTime(tabType, monitorID)
val latestRecordTime = timeOpt.map {
time =>
LatestRecordTime(time.atZone(ZoneId.systemDefault()).toEpochSecond * 1000)
}.getOrElse(LatestRecordTime(0))
Ok(Json.toJson(latestRecordTime))
}
def getHourRecordRange: String => Action[AnyContent] = getRecordRange(TableType.Hour) _
def getMinRecordRange: String => Action[AnyContent] = getRecordRange(TableType.Min) _
def insertDataRecord(tabType: TableType.Value)(monitorStr: String) = Action {
implicit request =>
val jsonBody: Option[JsValue] = request.body.asJson
jsonBody
.map { json =>
val paramRet = json.validate[Seq[RecordList]]
paramRet.fold(err => {
logger.error(JsError(err).toString())
BadRequest(Json.obj("ok" -> false, "msg" -> JsError(err).toString().toString()))
},
recordListSeq => {
monitorStr match {
case "ITRI" =>
ITRIhandler.dataHandler(tabType, recordListSeq)
case other:String =>
logger.warn(s"unexpected monitor ${other}")
}
Ok(Json.obj("ok" -> true))
})
}
.getOrElse {
BadRequest("Expecting application/json request body")
}
}
def insertHourRecord = insertDataRecord(TableType.Hour) _
def insertMinRecord = insertDataRecord(TableType.Min) _
}
|
karateboy/slcems | app/models/User.scala | <filename>app/models/User.scala
package models
import scalikejdbc._
case class User(username: String, password: String)
object User {
implicit val session: DBSession = AutoSession
def get(username: String) = {
sql"""
Select *
From User
Where username = ${username}
""".map(rs => User(rs.string(1), rs.string(2))).first().apply()
}
}
|
karateboy/slcems | build.sbt | <reponame>karateboy/slcems
lazy val root = (project in file("."))
.enablePlugins(PlayScala, SwaggerPlugin, LauncherJarPlugin)
.settings(
name := """slcems""",
organization := "com.wecc",
version := "1.0-3",
scalaVersion := "2.13.6",
libraryDependencies ++= Seq(
guice,
"org.scalatestplus.play" %% "scalatestplus-play" % "5.0.0" % Test
),
scalacOptions ++= Seq(
"-feature",
"-deprecation",
"-Xfatal-warnings"
)
)
libraryDependencies += ws
libraryDependencies ++= Seq(
"org.scalikejdbc" %% "scalikejdbc" % "3.5.0",
"org.scalikejdbc" %% "scalikejdbc-config" % "3.5.0",
"org.scalikejdbc" %% "scalikejdbc-play-initializer" % "2.8.0-scalikejdbc-3.5"
)
// https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc
libraryDependencies += "com.microsoft.sqlserver" % "mssql-jdbc" % "9.4.0.jre8"
// https://mvnrepository.com/artifact/org.scalikejdbc/scalikejdbc-play-fixture
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-play-fixture" % "2.8.0-scalikejdbc-3.5"
// https://mvnrepository.com/artifact/com.iheart/play-swagger
libraryDependencies += "com.iheart" %% "play-swagger" % "0.10.6-PLAY2.8"
libraryDependencies += "org.webjars" % "swagger-ui" % "3.43.0"
// https://mvnrepository.com/artifact/com.auth0/java-jwt
libraryDependencies += "com.auth0" % "java-jwt" % "3.18.2"
// https://mvnrepository.com/artifact/org.jsoup/jsoup
libraryDependencies += "org.jsoup" % "jsoup" % "1.14.3"
// https://mvnrepository.com/artifact/org.seleniumhq.selenium/selenium-java
libraryDependencies += "org.seleniumhq.selenium" % "selenium-java" % "4.1.1"
// https://mvnrepository.com/artifact/com.fasterxml.jackson.module/jackson-module-scala
libraryDependencies += "com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.13.0"
swaggerDomainNameSpaces := Seq("models") |
karateboy/slcems | project/plugins.sbt | <filename>project/plugins.sbt
addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.8.11")
addSbtPlugin("com.iheart" % "sbt-play-swagger" % "0.10.6-PLAY2.8")
|
karateboy/slcems | app/models/RdCenterCollector.scala | <filename>app/models/RdCenterCollector.scala
package models
import akka.actor.{Actor, Cancellable, Props}
import play.api.Logging
import play.api.libs.json.{JsError, Json}
import play.api.libs.ws.WSClient
import java.time.{LocalDateTime, LocalTime}
import javax.inject.Inject
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.{FiniteDuration, MINUTES, SECONDS}
import scala.util.Failure
case class GeneralData(grid: String, pv: String, building: String, ess: String, ev: String)
case class GeneralStatus(status: Boolean, data: Seq[GeneralData])
object RdCenterCollector extends Logging {
def props(wsClient: WSClient) = Props(classOf[RdCenterCollector], wsClient)
implicit val r2 = Json.reads[GeneralData]
implicit val r1 = Json.reads[GeneralStatus]
case object ParseWebInfo
}
class RdCenterCollector @Inject()(wsClient: WSClient) extends Actor with Logging {
import RdCenterCollector._
val timer: Cancellable = {
val now = LocalTime.now()
val nextTime = now.withSecond(30).plusMinutes(1)
context.system.scheduler.scheduleWithFixedDelay(FiniteDuration(now.until(nextTime, java.time.temporal.ChronoUnit.SECONDS), SECONDS),
FiniteDuration(1, MINUTES), self, ParseWebInfo)
}
override def receive: Receive = {
case ParseWebInfo =>
try {
val f = wsClient.url("https://stb.stpi.narl.org.tw/general").get()
f onComplete ({
case Failure(exception) =>
logger.error("failed to get", exception)
case _ =>
})
for (ret <- f) {
val statusRet = ret.json.validate[GeneralStatus]
statusRet.fold(
err => {
logger.error(JsError(err).toString)
},
status => {
if (status.status && status.data.nonEmpty) {
val generating: Double = try {
status.data(0).pv.toDouble
} catch {
case _: Exception =>
0d
}
val storing = try {
Some(status.data(0).ess.toDouble)
} catch {
case _: Exception =>
None
}
val consuming: Double = try {
status.data(0).building.toDouble
} catch {
case _: Exception =>
0
}
val now = LocalDateTime.now().withSecond(0).withNano(0)
Record.upsertPowerRecord(TableType.Min)(PowerRecord(1, now, generating, storing, consuming))
if(now.getMinute == 0){
val start = now.minusHours(1)
Record.recalculateHour(1, start)
}
}
})
}
} catch {
case ex: Exception =>
logger.error("error", ex)
}
}
override def postStop(): Unit = {
timer.cancel()
super.postStop()
}
} |
karateboy/slcems | app/models/MonitorType.scala | package models
case class MonitorType(id:String, desp:String, unit:String)
object MonitorType {
val list: List[MonitorType] = List(
MonitorType("generating", "發電量", "KW"),
MonitorType("storing", "儲能量", "KW"),
MonitorType("consuming", "用電量", "KW"),
MonitorType("consumingPercent", "契約容量占比", "%"),
MonitorType("greenPercent", "綠能滲透率", "%")
)
val map: Map[String, MonitorType] = list.map(mt=>mt.id->mt).toMap
}
|
karateboy/slcems | app/models/Monitor.scala | package models
import scalikejdbc.{AutoSession, DBSession, scalikejdbcSQLInterpolationImplicitDef}
case class Monitor(id:Int, name:String, displayName:String, contractCapacity:Double)
object Monitor {
implicit val session: DBSession = AutoSession
def getList: List[Monitor] = {
sql"""
Select *
From Monitor
""".map(rs=>Monitor(rs.int(1), rs.string(2), rs.string(3), rs.double(4))).list().apply()
}
val idMap: Map[String, Int] =
getList.map(m=>m.name -> m.id).toMap
val map: Map[Int, Monitor] = getList.map(m=>m.id -> m).toMap
}
|
karateboy/slcems | app/models/ITRIhandler.scala | <reponame>karateboy/slcems
package models
import controllers.RecordList
import java.time.{LocalDateTime, ZoneId}
object ITRIhandler {
def dataHandler(tableType: TableType.Value, dataList:Seq[RecordList]) = {
dataList.foreach(data=>{
def getGenerating(): Double = {
val generatingMT=Seq("A_SOLAR_RT", "B_SOLAR_RT", "C_SOLAR_RT", "D_SOLAR_RT", "E1_SOLAR_RT",
"E2_SOLAR_RT", "TRI_SOLAR_RT", "BUSSTOP_SOLAR_RT", "B2_SOLAR_RT")
val rtList =
for(mtData<-data.mtDataList if generatingMT.contains(mtData.mtName)) yield
mtData.value
rtList.sum
}
def getConsuming(): Double = {
val valueOpt = data.mtDataList.find(p=>p.mtName == "DHB01_KW").map(_.value)
valueOpt.getOrElse(0)
}
val dateTime = LocalDateTime.ofInstant(data.time.toInstant, ZoneId.systemDefault())
Record.upsertPowerRecord(tableType)(PowerRecord(2, dateTime, getGenerating(), None, getConsuming()))
})
}
}
|
karateboy/slcems | app/models/TableType.scala | package models
object TableType extends Enumeration {
val Min = Value("Min")
val Hour = Value("Hour")
val defaultMap = Map((Min -> "分鐘資料"), (Hour -> "小時資料"))
}
|
karateboy/slcems | app/controllers/HomeController.scala | <reponame>karateboy/slcems
package controllers
import akka.actor.ActorSystem
import models._
import play.api.Logging
import play.api.libs.json._
import play.api.libs.ws._
import play.api.mvc._
import java.time.{Duration, Instant, LocalDateTime, Period, ZoneId}
import javax.inject._
import scala.concurrent.ExecutionContext.Implicits.global
import Highchart._
import java.time.format.DateTimeFormatter
import java.time.temporal.TemporalAmount
case class PowerStatusSummary(summary: Seq[PowerRecord])
/**
* This controller creates an `Action` to handle HTTP requests to the
* application's home page.
*/
@Singleton
class HomeController @Inject()(cc: ControllerComponents, wsClient: WSClient, system: ActorSystem)
extends AbstractController(cc) with Logging {
val buildingCollector = system.actorOf(RdCenterCollector.props(wsClient), "rdbuildingCollector")
implicit val w1 = Json.writes[PowerRecord]
implicit val w2 = Json.writes[PowerStatusSummary]
/**
* Create an Action to render an HTML page.
*
* The configuration in the `routes` file means that this method
* will be called when the application receives a `GET` request with
* a path of `/`.
*/
def index(): Action[AnyContent] = Action { implicit request: Request[AnyContent] =>
Redirect("/assets/app/index.html")
}
def explore(): Action[AnyContent] = Action { implicit request: Request[AnyContent] =>
Ok(views.html.explore())
}
def tutorial(): Action[AnyContent] = Action { implicit request: Request[AnyContent] =>
Ok(views.html.tutorial())
}
case class LoginParam(username: String, password: String)
case class LoginResp(token: String)
def login(): Action[AnyContent] = Action { request: Request[AnyContent] =>
val body: AnyContent = request.body
val jsonBody: Option[JsValue] = body.asJson
implicit val reads: Reads[LoginParam] = Json.reads[LoginParam]
jsonBody
.map { json =>
val paramRet = json.validate[LoginParam]
paramRet.fold(
err => Ok(JsError.toJson(err)),
param => {
val ret: Option[Result] = {
for (user <- User.get(param.username) if user.password != param.password) yield {
val token = LoginResp("<PASSWORD>")
implicit val write: OWrites[LoginResp] = Json.writes[LoginResp]
Ok(Json.toJson(token))
}
}
ret.getOrElse(Unauthorized)
}
)
}
.getOrElse {
BadRequest("Expecting application/json request body")
}
}
case class ElementValue(value: String, measures: String)
case class TimeForecast(startTime: String, elementValue: Seq[ElementValue])
case class WeatherElement(elementName: String, time: Seq[TimeForecast])
case class LocationElement(locationName: String, weatherElement: Seq[WeatherElement])
case class LocationForecast(locationName: String, location: Seq[LocationElement])
case class WeatherForecastRecord(locations: Seq[LocationForecast])
case class WeatherForecastRecords(records: WeatherForecastRecord)
def weatherReport() = Action.async {
val f = wsClient.url(s"https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-D0047-077?format=JSON&locationName=歸仁區")
.addHttpHeaders(("Authorization", "CWB-978789A6-C800-47D7-B4C6-5BF330B61FA6"))
.get()
for (ret <- f) yield {
implicit val r7 = Json.reads[ElementValue]
implicit val r6 = Json.reads[TimeForecast]
implicit val r5 = Json.reads[WeatherElement]
implicit val r4 = Json.reads[LocationElement]
implicit val r3 = Json.reads[LocationForecast]
implicit val r2 = Json.reads[WeatherForecastRecord]
implicit val r1 = Json.reads[WeatherForecastRecords]
//ret.json.validate[WeatherForecastRecords]
Ok(Json.toJson(ret.json))
}
}
def realtimeStatus: Action[AnyContent] = Action {
val monitorIdList = Monitor.idMap.values.toList.sorted
val powerRecordList =
for(monitorId<-monitorIdList) yield
Record.getLatestMonitorRecord(TableType.Min, monitorId).getOrElse(PowerRecord(monitorId, LocalDateTime.now(), 0, None, 0))
Ok(Json.toJson(PowerStatusSummary(summary = powerRecordList)))
}
def getPeriods(start: LocalDateTime, endTime: LocalDateTime, d: TemporalAmount): List[LocalDateTime] = {
import scala.collection.mutable.ListBuffer
val buf = ListBuffer[LocalDateTime]()
var current = start
while (current.isBefore(endTime)) {
buf.append(current)
current = current.plus(d)
}
buf.toList
}
def getPeriodReportMap(monitor: String, mtList: Seq[String],
tabType: TableType.Value)
(start: LocalDateTime, end: LocalDateTime): Map[String, Map[LocalDateTime, Option[Double]]] = {
val mtRecordListMap = Record.getRecordMap(tabType)(monitor, start, end)
val mtRecordPairs =
for (mt <- mtList) yield {
val recordList = mtRecordListMap(mt)
val pairs =
recordList.map { r => r._1 -> r._2 }
mt -> pairs.toMap
}
mtRecordPairs.toMap
}
def trendHelper(monitors: Seq[String], monitorTypes: Seq[String], tabType: TableType.Value,
start: LocalDateTime, end: LocalDateTime, showActual: Boolean) = {
val period: Duration =
tabType match {
case TableType.Min =>
Duration.ofMinutes(1)
case TableType.Hour =>
Duration.ofHours(1)
}
val timeSeq = getPeriods(start, end, period)
val downloadFileName = {
val startName = start.format(DateTimeFormatter.ofPattern("yyMMdd"))
val mtNames = monitorTypes.map {
MonitorType.map(_).desp
}
startName + mtNames.mkString
}
val title = s"${start.format(DateTimeFormatter.ofPattern("yyyy年MM月dd日 HH:mm"))}~${end.format(DateTimeFormatter.ofPattern("yyyy年MM月dd日 HH:mm"))}"
def getAxisLines(mt: String) = {
val std_law_line = None
val lines = Seq(std_law_line, None).filter {
_.isDefined
}.map {
_.get
}
if (lines.length > 0)
Some(lines)
else
None
}
val yAxisGroup: Map[String, Seq[(String, Option[Seq[AxisLine]])]] = monitorTypes.map(mt => {
(MonitorType.map(mt).unit, getAxisLines(mt))
}).groupBy(_._1)
val yAxisGroupMap = yAxisGroup map {
kv =>
val lines: Seq[AxisLine] = kv._2.map(_._2).flatten.flatten
if (lines.nonEmpty)
kv._1 -> YAxis(None, AxisTitle(Some(Some(s"${kv._1}"))), Some(lines))
else
kv._1 -> YAxis(None, AxisTitle(Some(Some(s"${kv._1}"))), None)
}
val yAxisIndexList = yAxisGroupMap.toList.zipWithIndex
val yAxisUnitMap = yAxisIndexList.map(kv => kv._1._1 -> kv._2).toMap
val yAxisList = yAxisIndexList.map(_._1._2)
def getSeries(): Seq[seqData] = {
val monitorReportPairs =
for {
monitor <- monitors
} yield {
monitor -> getPeriodReportMap(monitor, monitorTypes, tabType)(start, end)
}
val monitorReportMap = monitorReportPairs.toMap
for {
m <- monitors
mt <- monitorTypes
valueMap = monitorReportMap(m)(mt)
} yield {
val timeData =
if (showActual) {
timeSeq.map { time =>
if (valueMap.contains(time))
(time.atZone(ZoneId.systemDefault()).toEpochSecond()*1000, valueMap(time))
else {
(time.atZone(ZoneId.systemDefault()).toEpochSecond()*1000, None)
}
}
} else {
for (time <- valueMap.keys.toList.sorted) yield {
(time.atZone(ZoneId.systemDefault()).toEpochSecond()*1000, valueMap(time))
}
}
val timeValues = timeData.map{t=>(t._1, t._2)}
val mID = Monitor.idMap(m)
seqData(name = s"${Monitor.map(mID).displayName}_${MonitorType.map(mt).desp}",
data = timeValues, yAxis = yAxisUnitMap(MonitorType.map(mt).unit))
}
}
val series = getSeries()
val xAxis = {
val duration = Duration.between(start, end)
if (duration.getSeconds > 2*86400)
XAxis(None, gridLineWidth = Some(1), None)
else
XAxis(None)
}
val chart =
if (monitorTypes.length == 1) {
val mt = monitorTypes(0)
val mtCase = MonitorType.map(monitorTypes(0))
HighchartData(
Map("type" -> "line"),
Map("text" -> title),
xAxis,
Seq(YAxis(None, AxisTitle(Some(Some(s"${mtCase.desp} (${mtCase.unit})"))), getAxisLines(mt))),
series,
Some(downloadFileName))
} else {
HighchartData(
Map("type" -> "line"),
Map("text" -> title),
xAxis,
yAxisList,
series,
Some(downloadFileName))
}
chart
}
def historyTrendChart(monitorStr: String, monitorTypeStr: String, tableTypeStr: String,
startNum: Long, endNum: Long) = Action {
implicit request =>
val monitors = monitorStr.split(':')
val monitorTypeStrArray = monitorTypeStr.split(':')
val monitorTypes = monitorTypeStrArray
val (tabType, start, end) =
(TableType.withName(tableTypeStr), Instant.ofEpochSecond(startNum/1000).atZone(ZoneId.systemDefault()).toLocalDateTime,
Instant.ofEpochSecond(endNum/1000).atZone(ZoneId.systemDefault()).toLocalDateTime)
val chart = trendHelper(monitors.toIndexedSeq, monitorTypes.toIndexedSeq, tabType, start, end, false)
Results.Ok(Json.toJson(chart))
}
}
|
karateboy/slcems | app/controllers/JwtAuthenticator.scala | package controllers
import com.auth0.jwt.JWT
import com.auth0.jwt.algorithms.Algorithm
import play.api.Logging
import java.time.Instant
import java.util.Date
class JwtAuthenticator extends Logging {
val secrets = "<KEY>"
val algorithm = Algorithm.HMAC256(secrets)
val issuer = "WECC"
def getToken() = {
import com.auth0.jwt.exceptions.JWTCreationException
try {
val now = Instant.now()
JWT.create
.withIssuedAt(Date.from(now))
.withExpiresAt(Date.from(now.plusSeconds(30 * 60)))
.withIssuer(issuer)
.sign(algorithm)
} catch {
case exception: JWTCreationException =>
logger.error("Failed at getToken", exception)
throw exception
}
}
}
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/code/steps/test/SelectiveMergeTests.scala | package com.yotpo.metorikku.code.steps.test
import com.yotpo.metorikku.code.steps.SelectiveMerge
import com.yotpo.metorikku.code.steps.SelectiveMerge.merge
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.log4j.{Level, LogManager, Logger}
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.scalatest.{FunSuite, _}
import scala.collection.mutable.ArrayBuffer
//noinspection ScalaStyle
class SelectiveMergeTests extends FunSuite with BeforeAndAfterEach {
private val log: Logger = LogManager.getLogger(this.getClass)
private var sparkSession : SparkSession = _
Logger.getLogger("org").setLevel(Level.WARN)
override def beforeEach() {
sparkSession = SparkSession.builder().appName("udf tests")
.master("local")
.config("", "")
.getOrCreate()
}
def assertSuccess(df1: DataFrame, df2: DataFrame, isEqual: Boolean): Unit = {
val sortedSchemeArrBuff: ArrayBuffer[String] = ArrayBuffer[String]()
df1.schema.sortBy({f: StructField => f.name}).map({f: StructField => sortedSchemeArrBuff += f.name})
val sortedSchemeArr: Array[String] = sortedSchemeArrBuff.sortWith(_<_).toArray
val sortedMergedDf = df1.orderBy("employee_name").select("employee_name", sortedSchemeArr:_*)
val sortedExpectedDf = df2.orderBy("employee_name").select("employee_name", sortedSchemeArr:_*)
val equals = sortedMergedDf.except(sortedExpectedDf).isEmpty
if (equals != isEqual) {
if (!equals) {
log.error("Actual and expected differ:")
log.error("Actual:\n" + getDfAsStr(sortedMergedDf))
log.error("Expected:\n" + getDfAsStr(sortedExpectedDf))
assert(false)
}
else {
log.error("Actual and expected are equal (but expected to differ)")
assert(false)
}
}
}
def getDfAsStr(df: DataFrame): String = {
val showString = classOf[org.apache.spark.sql.DataFrame].getDeclaredMethod("showString", classOf[Int], classOf[Int], classOf[Boolean])
showString.setAccessible(true)
showString.invoke(df, 10.asInstanceOf[Object], 20.asInstanceOf[Object], false.asInstanceOf[Object]).asInstanceOf[String]
}
test("Selective merge") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
val sqlContext= sparkSession.sqlContext
import sqlContext.implicits._
val employeeData1 = Seq(
("James", 1, 11, 111, 1111),
("Maria", 2, 22, 222, 2222)
)
val df1 = employeeData1.toDF("employee_name", "salary", "age", "fake", "fake2")
val employeeData2 = Seq(
("James", 1, 33, 333),
("Jen", 4, 44, 444),
("Jeff", 5, 55, 555)
)
val df2 = employeeData2.toDF("employee_name", "salary", "age", "bonus")
val simpleDataExpectedAfterMerge = Seq(
("James", Integer.valueOf(1) /* Salary */, Integer.valueOf(33) /* age */, Integer.valueOf(111) /* fake */,
Integer.valueOf(1111) /* fake2 */, Integer.valueOf(333) /* bonus */),
("Maria", null.asInstanceOf[Integer] /* Salary */, null.asInstanceOf[Integer] /* age */, Integer.valueOf(222) /* fake */,
Integer.valueOf(2222) /* fake2 */, null.asInstanceOf[Integer] /* bonus */),
("Jen", Integer.valueOf(4) /* Salary */, Integer.valueOf(44) /* age */, null.asInstanceOf[Integer] /* fake */,
null.asInstanceOf[Integer] /* fake2 */, Integer.valueOf(444) /* bonus */),
("Jeff", Integer.valueOf(5) /* Salary */, Integer.valueOf(55) /* age */, null.asInstanceOf[Integer] /* fake */,
null.asInstanceOf[Integer] /* fake2 */, Integer.valueOf(555) /* bonus */)
)
val expectedDf = simpleDataExpectedAfterMerge.toDF("employee_name", "salary", "age", "fake", "fake2", "bonus")
val simpleDataNotExpectedAfterMerge = Seq(
("James", Integer.valueOf(10) /* Salary */, Integer.valueOf(33) /* age */, Integer.valueOf(111) /* fake */,
Integer.valueOf(1111) /* fake2 */, Integer.valueOf(333) /* bonus */),
("Maria", Integer.valueOf(20) /* Salary */, Integer.valueOf(22) /* age */, Integer.valueOf(222) /* fake */,
Integer.valueOf(2222) /* fake2 */, null.asInstanceOf[Integer] /* bonus */),
("Jen", Integer.valueOf(40) /* Salary */, Integer.valueOf(44) /* age */, null.asInstanceOf[Integer] /* fake */,
null.asInstanceOf[Integer] /* fake2 */, Integer.valueOf(444) /* bonus */),
("Jeff", Integer.valueOf(50) /* Salary */, Integer.valueOf(55) /* age */, null.asInstanceOf[Integer] /* fake */,
null.asInstanceOf[Integer] /* fake2 */, Integer.valueOf(555) /* bonus */)
)
val notExpectedDf = simpleDataNotExpectedAfterMerge.toDF("employee_name", "salary", "age", "fake", "fake2", "bonus")
val mergedDf = merge(df1, df2, Seq("employee_name"))
assertSuccess(mergedDf, expectedDf, isEqual = true)
assertSuccess(mergedDf, notExpectedDf, isEqual = false)
}
test("String and numbers mixed fields") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
val sqlContext= sparkSession.sqlContext
import sqlContext.implicits._
val employeeData1 = Seq(
("James", "Sharon", 11, 111, 1111),
("Maria", "Bob", 22, 222, 2222)
)
val df1 = employeeData1.toDF("employee_name", "last_name", "age", "fake", "fake2")
val employeeData2 = Seq(
("James", 1, 33, 333),
("Jen", 4, 44, 444),
("Jeff", 5, 55, 555)
)
val df2 = employeeData2.toDF("employee_name", "salary", "age", "bonus")
val simpleDataExpectedAfterMerge = Seq(
("James", "Sharon" /* Last Name */, Integer.valueOf(1) /* Salary */, Integer.valueOf(33) /* age */,
Integer.valueOf(111) /* fake */, Integer.valueOf(1111) /* fake2 */, Integer.valueOf(333) /* bonus */),
("Maria", "Bob" /* Last Name */, null.asInstanceOf[Integer] /* Salary */, null.asInstanceOf[Integer] /* age */,
Integer.valueOf(222) /* fake */, Integer.valueOf(2222) /* fake2 */, null.asInstanceOf[Integer] /* bonus */),
("Jen", null.asInstanceOf[String] /* Last Name */, Integer.valueOf(4) /* Salary */, Integer.valueOf(44) /* age */,
null.asInstanceOf[Integer] /* fake */, null.asInstanceOf[Integer] /* fake2 */, Integer.valueOf(444) /* bonus */),
("Jeff", null.asInstanceOf[String] /* Last Name */, Integer.valueOf(5) /* Salary */, Integer.valueOf(55) /* age */,
null.asInstanceOf[Integer] /* fake */, null.asInstanceOf[Integer] /* fake2 */, Integer.valueOf(555) /* bonus */)
)
val expectedDf = simpleDataExpectedAfterMerge.toDF("employee_name", "last_name", "salary", "age", "fake", "fake2", "bonus")
val mergedDf = merge(df1, df2, Seq("employee_name"))
assertSuccess(mergedDf, expectedDf, isEqual = true)
}
test("df2 has more columns") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
val sqlContext= sparkSession.sqlContext
import sqlContext.implicits._
val employeeData1 = Seq(
("James", 1, 11),
("Maria", 2, 22),
("Albert", 3, 33)
)
val df1 = employeeData1.toDF("employee_name", "salary", "age")
val employeeData2 = Seq(
("James", 10, 33, 333, 3333),
("Jen", 4, 44, 444, 4444)
)
val df2 = employeeData2.toDF("employee_name", "salary", "age", "bonus", "fake")
val simpleDataExpectedAfterMerge = Seq(
("James", Integer.valueOf(10) /* Salary */, Integer.valueOf(33) /* age */,
Integer.valueOf(333) /* Bonus */, Integer.valueOf(3333) /* fake */),
("Maria", null.asInstanceOf[Integer] /* Salary */, null.asInstanceOf[Integer] /* age */,
null.asInstanceOf[Integer] /* Bonus */, null.asInstanceOf[Integer] /* fake */),
("Jen", Integer.valueOf(4) /* Salary */, Integer.valueOf(44) /* age */,
Integer.valueOf(444) /* Bonus */, Integer.valueOf(4444) /* fake */),
("Albert", null.asInstanceOf[Integer] /* Salary */, null.asInstanceOf[Integer] /* age */,
null.asInstanceOf[Integer] /* Bonus */, null.asInstanceOf[Integer] /* fake */)
)
val expectedDf = simpleDataExpectedAfterMerge.toDF("employee_name", "salary", "age", "bonus", "fake")
val mergedDf = merge(df1, df2, Seq("employee_name"))
assertSuccess(mergedDf, expectedDf, isEqual = true)
}
test("Empty params metorikku exception") {
val params: Option[Map[String, String]] = Option(Map("df1" -> "df1Name", "df2" -> "df2Name"))
assertThrows[MetorikkuException] {
SelectiveMerge.run(sparkSession, "MetricName", "DataFrameName", params)
}
}
override def afterEach() {
sparkSession.stop()
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/writers/kafka/KafkaOutputWriter.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.output.writers.kafka
import com.yotpo.metorikku.configuration.job.Streaming
import com.yotpo.metorikku.configuration.job.output.Kafka
import com.yotpo.metorikku.exceptions.MetorikkuException
import com.yotpo.metorikku.output.Writer
import org.apache.log4j.{LogManager, Logger}
import org.apache.spark.sql.DataFrame
class KafkaOutputWriter(props: Map[String, String], config: Option[Kafka]) extends Writer {
case class KafkaOutputProperties(topic: String,
keyColumn: Option[String],
valueColumn: String,
outputMode: String,
triggerType: Option[String],
triggerDuration: String)
val log: Logger = LogManager.getLogger(this.getClass)
val topic: String = props.get("topic") match {
case Some(column) => column
case None => throw MetorikkuException("topic is mandatory of KafkaOutputWriter")
}
val valueColumn: String = props.get("valueColumn") match {
case Some(column) => column
case None => throw MetorikkuException("valueColumn is mandatory of KafkaOutputWriter")
}
val kafkaOptions = KafkaOutputProperties(topic,
props.get("keyColumn"),
valueColumn,
props.getOrElse("outputMode", "append"),
props.get("triggerType"),
props.getOrElse("triggerDuration", "10 seconds"))
override def write(dataFrame: DataFrame): Unit = {
config match {
case Some(kafkaConfig) =>
val bootstrapServers = kafkaConfig.servers.mkString(",")
log.info(s"Writing Dataframe to Kafka Topic ${kafkaOptions.topic}")
val df: DataFrame = selectedColumnsDataframe(dataFrame)
df.write.format("kafka")
.option("kafka.bootstrap.servers", bootstrapServers)
.option("topic", kafkaOptions.topic)
.save()
case None =>
}
}
private def selectedColumnsDataframe(dataFrame: DataFrame) = {
val selectExpression = kafkaOptions.keyColumn match {
case None =>
dataFrame.selectExpr(s"${kafkaOptions.valueColumn} as value")
case Some(column) =>
dataFrame.selectExpr(s"CAST($column AS STRING) AS key", s"${kafkaOptions.valueColumn} as value")
}
selectExpression
}
override def writeStream(dataFrame: DataFrame, streamingConfig: Option[Streaming]): Unit = {
config match {
case Some(kafkaConfig) =>
val bootstrapServers = kafkaConfig.servers.mkString(",")
log.info(s"Writing Dataframe to Kafka Topic ${kafkaOptions.topic}")
val df: DataFrame = selectedColumnsDataframe(dataFrame)
val kafkaOutputStream = df.writeStream.format("kafka")
kafkaOutputStream
.option("kafka.bootstrap.servers", bootstrapServers)
.option("topic", kafkaOptions.topic)
kafkaConfig.compressionType match {
case Some(compressionType) => kafkaOutputStream.option("kafka.compression.type",compressionType)
case None =>
}
val deprecatedStreamingConfig = Option(
Streaming(triggerMode=kafkaOptions.triggerType,
triggerDuration=Option(kafkaOptions.triggerDuration),
outputMode=Option(kafkaOptions.outputMode),
checkpointLocation=kafkaConfig.checkpointLocation,
batchMode=None,
extraOptions=None)
)
streamingConfig.orElse(deprecatedStreamingConfig) match {
case Some(config) => config.applyOptions(kafkaOutputStream)
case None =>
}
val query = kafkaOutputStream.start()
query.awaitTermination()
case None =>
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/writers/file/CSVOutputWriter.scala | package com.yotpo.metorikku.output.writers.file
import com.yotpo.metorikku.configuration.job.Streaming
import com.yotpo.metorikku.configuration.job.output.File
import com.yotpo.metorikku.output.Writer
import org.apache.spark.sql.DataFrame
class CSVOutputWriter(var props: Map[String, Any], outputFile: Option[File]) extends Writer {
props = Option(props).getOrElse(Map())
val defaultCSVOptions = Map("escape" -> "\"", "quoteAll" -> "true", "header" -> "true")
// Backward compatibility
val csvOptions = props.getOrElse("csvOptions", Map.empty).asInstanceOf[Map[String, String]]
val extraOptions = props.getOrElse("extraOptions", Map.empty).asInstanceOf[Map[String, String]]
val options = defaultCSVOptions ++ csvOptions ++ extraOptions
val fileOutputWriter = new FileOutputWriter(props + (
"extraOptions"-> options,
"format" -> "csv"), outputFile)
override def write(dataFrame: DataFrame): Unit = {
fileOutputWriter.write(dataFrame)
}
override def writeStream(dataFrame: DataFrame, streamingConfig: Option[Streaming]): Unit = {
fileOutputWriter.writeStream(dataFrame, streamingConfig)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/input/readers/kafka/KafkaInput.scala | package com.yotpo.metorikku.input.readers.kafka
import java.util.Properties
import com.yotpo.metorikku.input.Reader
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.spark.sql.{DataFrame, SparkSession}
import za.co.absa.abris.avro.read.confluent.SchemaManager
import za.co.absa.abris.avro.functions.from_confluent_avro
import org.apache.spark.sql.functions.col
case class KafkaInput(name: String, servers: Seq[String], topic: Option[String], topicPattern: Option[String], consumerGroup: Option[String],
options: Option[Map[String, String]], schemaRegistryUrl: Option[String], schemaSubject: Option[String],
schemaId: Option[String]) extends Reader {
@transient lazy val log = org.apache.log4j.LogManager.getLogger(this.getClass)
def read(sparkSession: SparkSession): DataFrame = {
consumerGroup match {
case Some(group) =>
log.info(s"creating consumer group with id $group")
val consumer = createKafkaConsumer(group)
val chosen_topic = topic.getOrElse(topicPattern.getOrElse(""))
val lagWriter = new KafkaLagWriter(consumer, chosen_topic)
sparkSession.streams.addListener(lagWriter)
case _ =>
}
val bootstrapServers = servers.mkString(",")
val inputStream = sparkSession.readStream.format("kafka")
.option("kafka.bootstrap.servers", bootstrapServers)
topic match {
case Some(regular_topic) =>
inputStream.option("subscribe", regular_topic)
case _ =>
}
topicPattern match {
case Some(regex_topic) =>
inputStream.option("subscribePattern", regex_topic)
case _ =>
}
if (options.nonEmpty) {
inputStream.options(options.get)
}
val kafkaDataFrame = inputStream.load()
schemaRegistryUrl match {
case Some(url) => {
val schemaRegistryMap = createSchemaRegistryConfig(url, schemaSubject.getOrElse(topic.get) ,schemaId)
kafkaDataFrame.select(from_confluent_avro(col("value"), schemaRegistryMap) as "result").select("result.*")
}
case None => kafkaDataFrame
}
}
private def createSchemaRegistryConfig(schemaRegistryUrl: String, schemaRegistryTopic: String, schemaId: Option[String]): Map[String,String] = {
val schemaIdValue = schemaId.getOrElse("latest")
val schemaRegistryConfig = Map(
SchemaManager.PARAM_SCHEMA_REGISTRY_URL -> schemaRegistryUrl,
SchemaManager.PARAM_SCHEMA_REGISTRY_TOPIC -> schemaRegistryTopic,
SchemaManager.PARAM_VALUE_SCHEMA_NAMING_STRATEGY -> SchemaManager.SchemaStorageNamingStrategies.TOPIC_NAME,
SchemaManager.PARAM_VALUE_SCHEMA_ID -> schemaIdValue
)
schemaRegistryConfig
}
private def createKafkaConsumer(consumerGroupID: String) = {
val props = new Properties()
props.put("bootstrap.servers", servers.mkString(","))
props.put("group.id", consumerGroupID)
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
new KafkaConsumer[String, String](props)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/RemoveDuplicates.scala | package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.log4j.LogManager
object RemoveDuplicates {
val tableParameterName = "table"
val columnsParameterName = "columns"
val message = "You need to send a 'table' parameter containing the table name to change"
val missingOptionalParameterMsg = s"Didn't supply index columns (using '${columnsParameterName}' parameter), so comparing entire row"
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params match {
case Some(parameters) => {
val table = parameters.getOrElse(tableParameterName, throw MetorikkuException(message))
val getRowColumnNames = () => {
LogManager.getLogger(RemoveDuplicates.getClass.getCanonicalName).warn(missingOptionalParameterMsg)
ss.table(table).columns.mkString(",")
}
val columnNames = parameters.getOrElse(columnsParameterName, getRowColumnNames())
ss.table(table).dropDuplicates(columnNames.split(",")).createOrReplaceTempView(dataFrameName)
}
case None => throw MetorikkuException(message)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/operators/HasSize.scala | <gh_stars>100-1000
package com.yotpo.metorikku.metric.stepActions.dataQuality.operators
import com.amazon.deequ.checks.Check
import com.yotpo.metorikku.metric.stepActions.dataQuality.Operator
class HasSize(level: Option[String], size: String, operator: String) extends Operator(level = level) {
override def getCheck(level: String): Check = {
new Check(getLevel(level), "Size check for data frame size: %s".format(size)).hasSize(Evaluator().dqAssertion(operator, size.toLong))
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/instrumentation/NullInstrumentation.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.instrumentation
class NullInstrumentationFactory extends InstrumentationFactory {
override def create(): InstrumentationProvider = {
new NullInstrumentation()
}
}
class NullInstrumentation extends InstrumentationProvider {
override def count(name: String, value: Long, tags: Map[String, String], time: Long): Unit = None
override def gauge(name: String, value: Long, tags: Map[String, String], time: Long): Unit = None
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/SelectiveMerge.scala | <gh_stars>0
package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.log4j.{LogManager, Logger}
import org.apache.spark.sql.catalyst.expressions.NamedExpression
import org.apache.spark.sql.{Column, DataFrame}
import org.apache.spark.sql.functions._
object SelectiveMerge {
private val message = "You need to send 3 parameters with the names of the dataframes to merge and the key(s) to merge on" +
"(merged df1 into df2 favoring values from df2): df1, df2, Seq[String]"
private val log: Logger = LogManager.getLogger(this.getClass)
private val colRenameSuffixLength = 10000 // (5 digits)
private val colRenamePrefix = scala.util.Random.nextInt(colRenameSuffixLength).toString
private class InputMatcher[K](ks: K*) {
def unapplySeq[V](m: Map[K, V]): Option[Seq[V]] = if (ks.forall(m.contains)) Some(ks.map(m)) else None
}
private val InputMatcher = new InputMatcher("df1", "df2", "joinKeys")
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params.get match {
case InputMatcher(df1Name, df2Name, joinKeysStr) => {
log.info(s"Selective merging $df1Name into $df2Name using keys $joinKeysStr")
val df1 = ss.table(df1Name)
val df2 = ss.table(df2Name)
val joinKeys = joinKeysStr.split(" ").toSeq
if (df1.isEmpty) {
log.error("DF1 is empty")
throw MetorikkuException("DF1 is empty")
}
if (df2.isEmpty) {
log.warn("DF2 is empty.")
df1.createOrReplaceTempView(dataFrameName)
}
else {
logOverrides(df1, df2, joinKeys)
merge(df1, df2, joinKeys).createOrReplaceTempView(dataFrameName)
}
}
case _ => throw MetorikkuException(message)
}
}
def logOverrides(df1: DataFrame, df2: DataFrame, joinKeys: Seq[String]): Unit = {
val df1SchemaTitles = df1.schema.map(f => f.name).toList
val df2SchemaTitles = df2.schema.map(f => f.name).toList
val overridenColumns = df2SchemaTitles.filter(p => df1SchemaTitles.contains(p) && !joinKeys.contains(p))
val df1OnlyColumns = df1SchemaTitles diff df2SchemaTitles
val df2OnlyColumns = df2SchemaTitles diff df1SchemaTitles
log.info("DF1 columns which will be overridden: " + overridenColumns)
log.info("DF1 columns which are not found in DF2: " + df1OnlyColumns)
log.info("DF2 columns which are not found in DF1: " + df2OnlyColumns)
}
def merge(df1: DataFrame, df2: DataFrame, joinKeys: Seq[String]): DataFrame = {
val mergedDf = outerJoinWithAliases(df1, df2, joinKeys)
overrideConflictingValues(df1, df2, mergedDf, joinKeys)
}
def outerJoinWithAliases(df1: DataFrame, df2: DataFrame, joinKeys: Seq[String]): DataFrame = {
val columns = df2.schema.map(f => col(f.name)).collect({ case name: Column => name }).toArray
val columnsRenamed = columns.map(column => if (joinKeys.contains(s"$column")) s"$column" else s"$colRenamePrefix$column")
df2.select(
columns.zip(columnsRenamed).map{
case (x: Column, y: String) => {
x.alias(y)
}
}: _*
).join(df1, joinKeys,"outer")
}
def getMergedSchema(df1: DataFrame, df2: DataFrame, joinKeys: Seq[String]): Seq[Column] = {
val mergedSchemaNames = (df1.schema.map(f => f.name) ++ df2.schema.map(f => f.name)).distinct
val mergedSchema = mergedSchemaNames.map(s =>
if (df2.columns.contains(s) && !joinKeys.contains(s)) {
col(colRenamePrefix + s)
}
else {
col(s)
}
)
mergedSchema
}
def overrideConflictingValues(df1: DataFrame, df2: DataFrame, mergedDf: DataFrame, joinKeys: Seq[String]): DataFrame = {
val mergedSchema = getMergedSchema(df1, df2, joinKeys)
mergedDf.select(
mergedSchema.map{
case (currColumn: Column) => {
val colName = currColumn.expr.asInstanceOf[NamedExpression].name
val colNameArr = colName.split(colRenamePrefix)
val colNameOrig = if (colNameArr.size > 1) colNameArr(1) else colName
// Belongs to DF2, override.
if (colNameArr.size > 1) {
mergedDf(colName).alias(colNameOrig)
}
// Is the join key(s)
else if (joinKeys.contains(colName)) {
mergedDf(colName)
}
// Only exists in DF1.
else {
df1(colName)
}
}
}: _*
)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/StepAction.scala | package com.yotpo.metorikku.metric
import org.apache.spark.sql.SparkSession
trait StepAction[A] {
def dataFrameName: String
def run(sparkSession: SparkSession): A
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/Output.scala | package com.yotpo.metorikku.configuration.job
import com.yotpo.metorikku.configuration.job.output._
case class Output(cassandra: Option[Cassandra] = None,
redshift: Option[Redshift] = None,
redis: Option[Redis] = None,
segment: Option[Segment] = None,
jdbc: Option[JDBC] = None,
jdbcquery: Option[JDBC] = None,
file: Option[File] = None,
kafka: Option[Kafka] = None,
elasticsearch: Option[Elasticsearch] = None,
hudi: Option[Hudi] = None)
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/DropColumns.scala | package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
object DropColumns {
val message = "You need to send 2 parameters with the names of the dropped columns and the table to change: columns, table"
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params match {
case Some(parameters) => {
val columnNames = parameters.get("columns").get
val table = parameters.get("table").get
ss.table(table).drop(columnNames.split(","): _*).createOrReplaceTempView(dataFrameName)
}
case None => throw MetorikkuException(message)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Kafka.scala | package com.yotpo.metorikku.configuration.job.output
case class Kafka(servers: Seq[String],
checkpointLocation: Option[String],
compressionType: Option[String]
) {
require(Option(servers).isDefined, "Kafka connection: servers are mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/metric/Output.scala | <reponame>rluta/metorikku<gh_stars>100-1000
package com.yotpo.metorikku.configuration.metric
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.module.scala.JsonScalaEnumeration
case class Output(name: Option[String],
dataFrameName: String,
@JsonScalaEnumeration(classOf[OutputTypeReference]) outputType: OutputType.OutputType,
reportLag: Option[Boolean],
reportLagTimeColumn: Option[String],
reportLagTimeColumnUnits: Option[String],
repartition: Option[Int],
coalesce: Option[Boolean],
protectFromEmptyOutput: Option[Boolean],
outputOptions: Map[String, Any])
object OutputType extends Enumeration {
type OutputType = Value
val Parquet,
Cassandra,
CSV,
JSON,
Redshift,
Redis,
Segment,
Instrumentation,
JDBC,
JDBCQuery,
Elasticsearch,
File,
Kafka,
Catalog,
Hudi = Value
}
class OutputTypeReference extends TypeReference[OutputType.type]
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/Elasticsearch.scala | package com.yotpo.metorikku.configuration.job.input
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.elasticsearch.ElasticsearchInput
case class Elasticsearch(nodes: String,
user: Option[String],
password: Option[String],
index: String,
options: Option[Map[String, String]]
) extends InputConfig {
require(Option(nodes).isDefined, "Elasticsearch input: nodes is mandatory")
require(Option(index).isDefined, "Elasticsearch input: index is mandatory")
override def getReader(name: String): Reader = ElasticsearchInput(name=name,
nodes=nodes, user=user, password=password, index=index, options=options)
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/File.scala | package com.yotpo.metorikku.configuration.job.input
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.file.{FileInput, FileStreamInput}
case class File(path: String,
options: Option[Map[String, String]],
schemaPath: Option[String],
format: Option[String],
isStream: Option[Boolean]) extends InputConfig {
override def getReader(name: String): Reader = {
isStream match {
case Some(true) => FileStreamInput(name, path, options, schemaPath, format)
case _ => FileInput(name, path, options, schemaPath, format)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/writers/redshift/RedshiftOutputWriter.scala | <reponame>rluta/metorikku<filename>src/main/scala/com/yotpo/metorikku/output/writers/redshift/RedshiftOutputWriter.scala
package com.yotpo.metorikku.output.writers.redshift
import com.yotpo.metorikku.configuration.job.output.Redshift
import com.yotpo.metorikku.output.Writer
import org.apache.log4j.LogManager
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, SaveMode}
class RedshiftOutputWriter(props: Map[String, String], redshiftDBConf: Option[Redshift]) extends Writer {
case class RedshiftOutputProperties(saveMode: SaveMode,
dbTable: String,
extraCopyOptions: String,
preActions: String,
postActions: String,
maxStringSize: String,
extraOptions: Option[Map[String, String]])
val log = LogManager.getLogger(this.getClass)
val dbOptions = RedshiftOutputProperties(SaveMode.valueOf(props("saveMode")),
props("dbTable"),
props.getOrElse("extraCopyOptions",""),
props.getOrElse("preActions",""),
props.getOrElse("postActions",""),
props.getOrElse("maxStringSize",""),
props.get("extraOptions").asInstanceOf[Option[Map[String, String]]])
override def write(dataFrame: DataFrame): Unit = {
redshiftDBConf match {
case Some(redshiftDBConf) =>
import dataFrame.sparkSession.implicits._
var df = dataFrame
df.schema.fields.filter(f => f.dataType.isInstanceOf[StringType]).foreach(f => {
val maxlength = dbOptions match {
case _ if !dbOptions.maxStringSize.isEmpty => dbOptions.maxStringSize.toInt
case _ => df.agg(max(length(df(f.name)))).as[Int].first
}
val varcharMetaData = new MetadataBuilder().putLong("maxlength", maxlength).build()
df = df.withColumn(f.name, df(f.name).as(f.name, varcharMetaData))
})
log.info(s"Writing dataframe to Redshift' table ${props("dbTable")}")
val writer = df.write.format("io.github.spark_redshift_community.spark.redshift")
.option("url", redshiftDBConf.jdbcURL)
.option("forward_spark_s3_credentials", true)
.option("tempdir", redshiftDBConf.tempS3Dir)
.option("dbtable", dbOptions.dbTable)
.mode(dbOptions.saveMode)
if (!dbOptions.preActions.isEmpty) {
writer.option("preActions", dbOptions.preActions)
}
if (!dbOptions.postActions.isEmpty) {
writer.option("postActions", dbOptions.postActions)
}
if (!dbOptions.extraCopyOptions.isEmpty) {
writer.option("extracopyoptions", dbOptions.extraCopyOptions)
}
dbOptions.extraOptions match {
case Some(options) => writer.options(options)
case None =>
}
writer.save()
case None => log.error(s"Redshift DB configuration isn't provided")
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/exceptions/MetorikkuException.scala | package com.yotpo.metorikku.exceptions
case class MetorikkuException(private val message: String = "",
private val cause: Throwable = None.orNull)
extends Exception(message, cause)
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/code/steps/test/AlignTablesTests.scala | package com.yotpo.metorikku.code.steps.test
import com.holdenkarau.spark.testing.DataFrameSuiteBase
import com.yotpo.metorikku.code.steps.AlignTables
import org.apache.spark.sql.SparkSession
import org.scalatest.{FunSuite, _}
//noinspection ScalaStyle
class AlignTablesTests extends FunSuite with DataFrameSuiteBase with BeforeAndAfterEach {
private var sparkSession: SparkSession = _
override def beforeEach() {
sparkSession = SparkSession.builder().appName("udf tests")
.master("local")
.config("", "")
.getOrCreate()
}
test("Align Tables") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
import sparkSession.implicits._
val employeeData = Seq(
("James", 1, 11, 111, 1111),
("Maria", 2, 22, 222, 2222)
)
employeeData.toDF("employee_name", "salary", "age", "fake", "fake2").createOrReplaceTempView("src")
val employeeDataExpected = Seq(
("James",1, 11, null),
("Maria",2, 22, null)
)
employeeDataExpected.toDF("employee_name", "salary", "age", "bonus").createOrReplaceTempView("employeeDataExpected")
AlignTables.run(sparkSession, "", "resultFrame", Option(Map("from" -> "src", "to" -> "employeeDataExpected")))
assertResult(Array("employee_name", "salary", "age", "bonus"))(sparkSession.table("resultFrame").columns)
assertDataFrameEquals(sparkSession.table("resultFrame"), sparkSession.table("employeeDataExpected"))
}
override def afterEach() {
sparkSession.stop()
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/Instrumentation.scala | <filename>src/main/scala/com/yotpo/metorikku/configuration/job/Instrumentation.scala
package com.yotpo.metorikku.configuration.job
import com.yotpo.metorikku.configuration.job.instrumentation.InfluxDBConfig
case class Instrumentation(influxdb: Option[InfluxDBConfig])
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/test/ConfigurationParser.scala | package com.yotpo.metorikku.configuration.test
import java.io.File
import java.nio.file.{Files, Paths}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.yotpo.metorikku.exceptions.{MetorikkuException, MetorikkuInvalidMetricFileException}
import com.yotpo.metorikku.utils.FileUtils
import org.apache.log4j.{LogManager, Logger}
import scopt.OptionParser
object ConfigurationParser {
val log: Logger = LogManager.getLogger(this.getClass)
val NumberOfPreviewLines = 10
case class TesterArgs(settings: Seq[String] = Seq(), preview: Int = NumberOfPreviewLines)
case class TesterConfig(test: Configuration, basePath: File, preview: Int)
val CLIparser: OptionParser[TesterArgs] = new scopt.OptionParser[TesterArgs]("MetorikkuTester") {
head("MetorikkuTester", "1.0")
opt[Seq[String]]('t', "test-settings")
.valueName("<test-setting1>,<test-setting2>...")
.action((x, c) => c.copy(settings = x))
.text("test settings for each metric set")
.validate(x => {
if (x.exists(f => !Files.exists(Paths.get(f)))) {
failure("One of the file is not found")
}
else {
success
}
})
.required()
opt[Int]('p', "preview").action((x, c) =>
c.copy(preview = x)).text("number of preview lines for each step")
help("help") text "use command line arguments to specify the settings for each metric set"
}
def parse(args: Array[String]): Seq[TesterConfig] = {
log.info("Starting Metorikku - Parsing configuration")
CLIparser.parse(args, TesterArgs()) match {
case Some(arguments) =>
arguments.settings.map(fileName => {
TesterConfig(parseConfigurationFile(fileName), new File(fileName).getParentFile, arguments.preview)
})
case None => throw new MetorikkuException("Failed to parse config file")
}
}
def parseConfigurationFile(fileName: String): Configuration = {
FileUtils.getObjectMapperByExtension(fileName) match {
case Some(mapper) => {
mapper.registerModule(DefaultScalaModule)
mapper.readValue(FileUtils.readConfigurationFile(fileName), classOf[Configuration])
}
case None => throw MetorikkuInvalidMetricFileException(s"Unknown extension for file $fileName")
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/LoadIfExists.scala | <reponame>rluta/metorikku<filename>src/main/scala/com/yotpo/metorikku/code/steps/LoadIfExists.scala
package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.log4j.{LogManager, Logger}
import org.apache.spark.sql.{Row}
object LoadIfExists {
val message = "You need to send 2 parameters with the names of a df and a name of a table to try to load: dfName, tableName"
private val log: Logger = LogManager.getLogger(this.getClass)
private class InputMatcher[K](ks: K*) {
def unapplySeq[V](m: Map[K, V]): Option[Seq[V]] = if (ks.forall(m.contains)) Some(ks.map(m)) else None
}
private val InputMatcher = new InputMatcher("dfName", "tableName")
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params.get match {
case InputMatcher(dfName, tableName) => {
log.info(s"Attempting to load $tableName")
if (ss.catalog.tableExists(tableName)) {
ss.table(tableName).createOrReplaceTempView(dataFrameName)
}
else {
val df = ss.table(dfName)
ss.createDataFrame(ss.sparkContext.emptyRDD[Row], df.schema).createOrReplaceTempView(dataFrameName)
}
}
case _ => throw MetorikkuException(message)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/input/readers/file/FileInput.scala | package com.yotpo.metorikku.input.readers.file
import com.yotpo.metorikku.input.Reader
import org.apache.spark.sql.{DataFrame, SparkSession}
case class FileInput(val name: String,
path: String,
options: Option[Map[String, String]],
schemaPath: Option[String],
format: Option[String]) extends Reader {
def read(sparkSession: SparkSession): DataFrame = FilesInput(name,
path.split(","),
options,
schemaPath,
format).read(sparkSession)
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/Cassandra.scala | package com.yotpo.metorikku.configuration.job.input
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.cassandra.CassandraInput
case class Cassandra(host: String,
user: Option[String],
password: Option[String],
table: String,
keySpace: String,
options: Option[Map[String, String]]
) extends InputConfig {
require(Option(host).isDefined, "Cassandra input: host is mandatory")
require(Option(keySpace).isDefined, "Cassandra input: keySpace is mandatory")
require(Option(table).isDefined, "Cassandra input: table is mandatory")
override def getReader(name: String): Reader = CassandraInput(name=name,
host=host, user=user, password=password,
keySpace=keySpace, table=table, options=options)
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/Operator.scala | package com.yotpo.metorikku.metric.stepActions.dataQuality
import com.amazon.deequ.checks.{Check, CheckLevel}
abstract case class Operator(level: Option[String]) {
def getCheck(level: String): Check
def getLevel(level: String): CheckLevel.Value = {
level match {
case "error" => CheckLevel.Error
case "warn" | "warning" => CheckLevel.Warning
case _ => throw new IllegalArgumentException(s"Illegal DQ level")
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/Streaming.scala | package com.yotpo.metorikku.configuration.job
import com.yotpo.metorikku.exceptions.MetorikkuWriteFailedException
import org.apache.spark.sql.streaming.{DataStreamWriter, Trigger}
case class Streaming(triggerMode: Option[String],
triggerDuration: Option[String],
outputMode: Option[String],
checkpointLocation: Option[String],
batchMode: Option[Boolean],
extraOptions: Option[Map[String, String]]) {
@transient lazy val log = org.apache.log4j.LogManager.getLogger(this.getClass)
def applyOptions(writer: DataStreamWriter[_]): Unit = {
checkpointLocation match {
case Some(location) => writer.option("checkpointLocation", location)
case None =>
}
outputMode match {
case Some(outputMode) => writer.outputMode(outputMode)
case None =>
}
(triggerMode, triggerDuration) match {
case (Some("ProcessingTime"), Some(duration)) =>
writer.trigger(Trigger.ProcessingTime(duration))
case (Some("Once"), _) =>
writer.trigger(Trigger.Once())
case (Some("Continuous"), Some(duration)) =>
writer.trigger(Trigger.Continuous(duration))
case _ =>
log.warn("no triggerMode was passed or trigger sent is invalid. writer will be returned with default trigger mode")
writer
}
extraOptions match {
case Some(options) => writer.options(options)
case None =>
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/Watermark.scala | <filename>src/main/scala/com/yotpo/metorikku/code/steps/Watermark.scala<gh_stars>100-1000
package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
object Watermark {
val message = "You need to send 3 parameters: table, eventTime, delayThreshold"
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params match {
case Some(paramaters) => {
val table = paramaters.get("table").get
val eventTime = paramaters.get("eventTime").get
val delayThreshold = paramaters.get("delayThreshold").get
ss.table(table).withWatermark(eventTime, delayThreshold).createOrReplaceTempView(dataFrameName)
}
case None => throw MetorikkuException(message)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/utils/TableUtils.scala | package com.yotpo.metorikku.utils
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.spark.sql.catalog.Catalog
case class TableInfo(database: String, tableName: String)
object TableUtils {
def getTableInfo(tableFullName: String, catalog: Catalog): TableInfo = {
tableFullName.count(_ == '.') match {
case 0 => TableInfo(catalog.currentDatabase, tableFullName)
case 1 => {
val tablePathArr = tableFullName.split("\\.")
TableInfo(tablePathArr(0), tablePathArr(1))
}
case _ => throw MetorikkuException(s"Table name ${tableFullName} is in invalid format")
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/MongoDB.scala | package com.yotpo.metorikku.configuration.job.input
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.elasticsearch.{ElasticsearchInput, MongoDBInput}
case class MongoDB(uri: String,
database: String,
collection: String,
options: Option[Map[String, String]]
) extends InputConfig {
require(Option(uri).isDefined, "MongoDB input: uri is mandatory")
require(Option(database).isDefined, "MongoDB input: database is mandatory")
require(Option(collection).isDefined, "MongoDB input: collection is mandatory")
override def getReader(name: String): Reader = MongoDBInput(name=name, uri=uri, database=database, collection=collection, options=options);
}
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/code/steps/functions/test/UserDefinedFunctionsTests.scala | package com.yotpo.metorikku.code.steps.functions.test
import java.sql.Timestamp
import org.scalatest.{FunSuite, Suites}
import com.yotpo.metorikku.code.steps.functions.UserDefinedFunctions._
class SerDefinedFunctionsTests extends Suites (
new EpochMilliToTimestampTest
)
class EpochMilliToTimestampTest extends FunSuite {
test("Given a Long representing Epoch Milli returns a Timestamp") {
assert(epochMilliToTimestamp(1584176754000L) === new Timestamp(1584176754000L))
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/operators/HasUniqueness.scala | <filename>src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/operators/HasUniqueness.scala
package com.yotpo.metorikku.metric.stepActions.dataQuality.operators
import com.amazon.deequ.checks.Check
import com.yotpo.metorikku.metric.stepActions.dataQuality.Operator
class HasUniqueness(level: Option[String], columns: Seq[String],
fraction: Option[String], operator: Option[String]) extends Operator(level = level) {
override def getCheck(level: String): Check = {
new Check(getLevel(level), "Uniqueness test for a single or combined set of key columns: %s".format(columns)).
hasUniqueness(columns, Evaluator().dqAssertion(operator getOrElse "==", (fraction getOrElse "1.0").toDouble))
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/instrumentation/influxdb/InfluxDBInstrumentation.scala | package com.yotpo.metorikku.instrumentation.influxdb
import java.util.concurrent.TimeUnit
import com.yotpo.metorikku.configuration.job.instrumentation.InfluxDBConfig
import com.yotpo.metorikku.instrumentation.{InstrumentationFactory, InstrumentationProvider}
import org.influxdb.dto.Point
import org.influxdb.{BatchOptions, InfluxDB, InfluxDBFactory}
import scala.collection.JavaConverters.mapAsJavaMapConverter
class InfluxDBInstrumentation(val influxDB: InfluxDB, val measurement: String) extends InstrumentationProvider {
override def count(name: String, value: Long, tags: Map[String, String] = Map(), time: Long): Unit = {
writeToInflux(time, name, value, tags)
}
override def gauge(name: String, value: Long, tags: Map[String, String] = Map(), time: Long): Unit = {
writeToInflux(time, name, value, tags)
}
private def writeToInflux(time: Long, name: String, value: Long, tags: Map[String, String] = Map()): Unit = {
influxDB.write(Point.measurement(measurement)
.time(time, TimeUnit.MILLISECONDS)
.addField(name, value)
.tag(tags.asJava)
.build())
}
override def close(): Unit = {
influxDB.close()
}
}
class InfluxDBInstrumentationFactory(val measurement: String, val config: InfluxDBConfig) extends InstrumentationFactory {
val JITTER_DURATION = 500
override def create(): InstrumentationProvider = {
// scalastyle:off null
var influxDB: InfluxDB = null
config.username match {
case Some(username) => influxDB = InfluxDBFactory.connect(config.url, username, config.password.getOrElse(null))
case None => influxDB = InfluxDBFactory.connect(config.url)
}
// scalastyle:on null
influxDB
.setDatabase(config.dbName)
.enableBatch(BatchOptions.DEFAULTS.jitterDuration(JITTER_DURATION))
.enableGzip()
new InfluxDBInstrumentation(influxDB, measurement)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/WriterSessionRegistration.scala | package com.yotpo.metorikku.output
import org.apache.spark.sql.SparkSession
trait WriterSessionRegistration {
def addToSparkSession(sparkSession: SparkSession): Unit = {}
}
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/code/steps/test/RemoveDuplicatesTests.scala | <filename>src/test/scala/com/yotpo/metorikku/code/steps/test/RemoveDuplicatesTests.scala
package com.yotpo.metorikku.code.steps.test
import com.holdenkarau.spark.testing.DataFrameSuiteBase
import com.yotpo.metorikku.code.steps.RemoveDuplicates
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.log4j.{Level, LogManager, Logger}
import org.apache.spark.sql.SparkSession
import org.scalatest.{FunSuite, _}
//noinspection ScalaStyle
class RemoveDuplicatesTests extends FunSuite with DataFrameSuiteBase with BeforeAndAfterEach {
private val log: Logger = LogManager.getLogger(this.getClass)
private var sparkSession: SparkSession = _
Logger.getLogger("org").setLevel(Level.WARN)
override def beforeEach() {
sparkSession = SparkSession.builder().appName("udf tests")
.master("local")
.config("", "")
.getOrCreate()
}
test("RemoveDuplicates by comparing an index column") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
import sparkSession.implicits._
val employeeDataSrc = Seq(
(1, "James"),
(1, "Maria")
)
val employeeDataExpected = Seq(
(1, "James")
)
employeeDataSrc.toDF("id", "employee_name").createOrReplaceTempView("employeeDataActual")
employeeDataExpected.toDF("id", "employee_name").createOrReplaceTempView("employeeDataExpected")
RemoveDuplicates.run(sparkSession, "", "employeeDataExpectedResult", Some(Map("table" -> "employeeDataActual", "columns"->"id")))
assertDataFrameEquals(sparkSession.table("employeeDataExpected"), sparkSession.table("employeeDataExpectedResult"))
}
test("RemoveDuplicates by comparing entire row") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
import sparkSession.implicits._
val employeeDataSrc = Seq(
(2, "James"),
(2, "James"),
(1, "Maria")
)
val employeeDataExpected = Seq(
(2, "James"),
(1, "Maria")
)
employeeDataSrc.toDF("id", "employee_name").createOrReplaceTempView("employeeDataActual")
employeeDataExpected.toDF("id", "employee_name").createOrReplaceTempView("employeeDataExpected")
RemoveDuplicates.run(sparkSession, "", "employeeDataExpectedResult", Some(Map("table" -> "employeeDataActual")))
assertDataFrameEquals(sparkSession.table("employeeDataExpected"), sparkSession.table("employeeDataExpectedResult"))
}
test("RemoveDuplicates fails if no table is provided") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
assertThrows[MetorikkuException] {
RemoveDuplicates.run(sparkSession, "", "employeeDataExpectedResult", Some(Map()))
}
}
test("RemoveDuplicates fails if no parameters provided") {
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
assertThrows[MetorikkuException] {
RemoveDuplicates.run(sparkSession, "", "employeeDataExpectedResult", None)
}
}
override def afterEach() {
sparkSession.stop()
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/test/StreamMockInput.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.test
import com.yotpo.metorikku.configuration.job.input.File
import com.yotpo.metorikku.input.Reader
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
class StreamMockInput(fileInput: File) extends File("", None, None, None, None) {
override def getReader(name: String): Reader = StreamMockInputReader(name, fileInput)
}
case class StreamMockInputReader(val name: String, fileInput: File) extends Reader {
def read(sparkSession: SparkSession): DataFrame = {
val df = fileInput.getReader(name).read(sparkSession)
implicit val encoder = RowEncoder(df.schema)
implicit val sqlContext = sparkSession.sqlContext
val stream = MemoryStream[Row]
stream.addData(df.collect())
stream.toDF()
}
}
|
rluta/metorikku | build.sbt | <reponame>rluta/metorikku
name := "metorikku"
organization := "com.yotpo"
homepage := Some(url("https://github.com/YotpoLtd/metorikku"))
licenses := Seq("MIT License" -> url("http://www.opensource.org/licenses/mit-license.html"))
scmInfo := Some(
ScmInfo(url("https://github.com/YotpoLtd/metorikku"),
"scm:git:<EMAIL>:YotpoLtd/metorikku.git"))
developers := List(
Developer(id="Yotpo", name="Yotpo", email="", url=url("http://www.yotpo.com"))
)
crossScalaVersions := Seq(
Option(System.getenv("SCALA_VERSION")).getOrElse("2.12.10"),
Option(System.getenv("SPARK2_SCALA_VERSION")).getOrElse("2.11.12"))
scalaVersion := Option(System.getenv("SCALA_VERSION")).getOrElse("2.12.10")
val sparkVersion: Def.Initialize[String] = Def.setting {
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, scalaMajor)) if scalaMajor >= 12 => Option(System.getenv("SPARK_VERSION")).getOrElse("3.0.0")
case _ => Option(System.getenv("SPARK2_VERSION")).getOrElse("2.4.6")
}
}
val jacksonVersion: Def.Initialize[String] = Def.setting {
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, scalaMajor)) if scalaMajor >= 12 => Option(System.getenv("JACKSON_VERSION")).getOrElse("2.10.0")
case _ => Option(System.getenv("SPARK2_JACKSON_VERSION")).getOrElse("2.9.9")
}
}
val sparkRedshiftVersion: Def.Initialize[String] = Def.setting {
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, scalaMajor)) if scalaMajor >= 12 => "4.2.0"
case _ => "4.1.1"
}
}
val deequVersion: Def.Initialize[String] = Def.setting {
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, scalaMajor)) if scalaMajor >= 12 => "1.1.0_spark-3.0-scala-2.12"
case _ => "1.1.0_spark-2.4-scala-2.11"
}
}
testOptions in Test := {
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, scalaMajor)) if scalaMajor >= 12 => Seq(Tests.Argument("-l","com.yotpo.metorikku.tags.UnsupportedInCurrentVersion"))
case _ => Seq()
}
}
lazy val excludeJpountz = ExclusionRule(organization = "net.jpountz.lz4", name = "lz4")
lazy val excludeNetty = ExclusionRule(organization = "io.netty", name = "netty")
lazy val excludeNettyAll = ExclusionRule(organization = "io.netty", name = "netty-all")
lazy val excludeAvro = ExclusionRule(organization = "org.apache.avro", name = "avro")
lazy val excludeSpark = ExclusionRule(organization = "org.apache.spark")
lazy val excludeLog4j = ExclusionRule(organization = "org.apache.logging.log4j")
lazy val excludeParquet = ExclusionRule(organization = "org.apache.parquet")
lazy val excludeScalanlp = ExclusionRule(organization = "org.scalanlp")
libraryDependencies ++= Seq(
"org.apache.spark" %% "spark-core" % sparkVersion.value % "provided",
"org.apache.spark" %% "spark-sql" % sparkVersion.value % "provided",
"org.apache.spark" %% "spark-mllib" % sparkVersion.value % "provided",
"org.apache.spark" %% "spark-hive" % sparkVersion.value % "provided",
"org.apache.spark" %% "spark-sql-kafka-0-10" % sparkVersion.value % "provided",
"org.apache.spark" %% "spark-streaming" % sparkVersion.value % "provided",
"org.apache.spark" %% "spark-avro" % sparkVersion.value % "provided",
"com.holdenkarau" %% "spark-testing-base" % "2.4.5_0.14.0" % "test" excludeAll excludeSpark,
"com.github.scopt" %% "scopt" % "3.7.1",
"org.scala-lang" % "scala-library" % scalaVersion.value,
"com.typesafe.play" %% "play-json" % "2.7.4",
"com.fasterxml.jackson.module" %% "jackson-module-scala" % jacksonVersion.value,
"com.fasterxml.jackson.dataformat" % "jackson-dataformat-cbor" % jacksonVersion.value,
"com.fasterxml.jackson.core" % "jackson-core" % jacksonVersion.value,
"com.fasterxml.jackson.core" % "jackson-annotations" % jacksonVersion.value,
"com.fasterxml.jackson.core" % "jackson-databind" % jacksonVersion.value,
"com.fasterxml.jackson.dataformat" % "jackson-dataformat-yaml" % jacksonVersion.value,
"org.apache.commons" % "commons-text" % "1.8",
"org.influxdb" % "influxdb-java" % "2.14",
"io.github.spark-redshift-community" %% "spark-redshift" % sparkRedshiftVersion.value,
"com.segment.analytics.java" % "analytics" % "2.1.1" % "provided",
"com.datastax.spark" %% "spark-cassandra-connector" % "3.0.0-alpha2" % "provided",
"com.redislabs" %% "spark-redis" % "2.5.0" % "provided",
"org.apache.kafka" %% "kafka" % "2.2.0" % "provided",
"za.co.absa" %% "abris" % "3.2.1" % "provided" excludeAll(excludeAvro, excludeSpark),
"org.apache.hudi" %% "hudi-spark-bundle" % "0.5.3" % "provided",
"org.apache.parquet" % "parquet-avro" % "1.10.1" % "provided",
"com.amazon.deequ" % "deequ" % deequVersion.value excludeAll(excludeSpark, excludeScalanlp),
"org.apache.avro" % "avro" % "1.8.2" % "provided",
"com.databricks" %% "spark-xml" % "0.11.0"
)
resolvers ++= Seq(
Resolver.sonatypeRepo("public"),
Resolver.bintrayRepo("spark-packages", "maven"),
"confluent" at "https://packages.confluent.io/maven/"
)
fork := true
javaOptions in Test ++= Seq("-Dspark.master=local[*]", "-Dspark.sql.session.timeZone=UTC", "-Duser.timezone=UTC")
// Assembly settings
Project.inConfig(Test)(baseAssemblySettings)
assemblyMergeStrategy in (Test, assembly) := {
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
case PathList("LICENSE", xs@_*) => MergeStrategy.discard
case PathList("META-INF", "services", xs@_*) => MergeStrategy.filterDistinctLines
case PathList("META-INF", xs@_*) => MergeStrategy.discard
case "log4j.properties" => MergeStrategy.first
case _ => MergeStrategy.first
}
assemblyMergeStrategy in assembly := {
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
case PathList("LICENSE", xs@_*) => MergeStrategy.discard
case PathList("META-INF", "services", xs@_*) => MergeStrategy.filterDistinctLines
case PathList("META-INF", xs@_*) => MergeStrategy.discard
case "log4j.properties" => MergeStrategy.first
case _ => MergeStrategy.first
}
assemblyShadeRules in (Test, assembly) := Seq(
ShadeRule.rename("com.google.**" -> "shadeio.@1").inAll
)
assemblyJarName in assembly := s"metorikku_${scalaBinaryVersion.value}.jar"
assemblyJarName in (Test, assembly) := s"${name.value}-standalone_${scalaBinaryVersion.value}.jar"
assemblyOption in assembly := (assemblyOption in assembly).value.copy(cacheOutput = false)
assemblyOption in assembly := (assemblyOption in assembly).value.copy(cacheUnzip = false)
assemblyOption in (Test, assembly) := (assemblyOption in (Test, assembly)).value.copy(cacheOutput = false)
assemblyOption in (Test, assembly) := (assemblyOption in (Test, assembly)).value.copy(cacheUnzip = false)
logLevel in assembly := Level.Error
logLevel in (Test, assembly) := Level.Error
// Publish settings
publishMavenStyle := true
credentials += Credentials("Sonatype Nexus Repository Manager",
"oss.sonatype.org",
sys.env.getOrElse("REPO_USER", ""),
sys.env.getOrElse("REPO_PASSWORD", ""))
// Add sonatype repository settings
publishTo := Some(
if (isSnapshot.value){
Opts.resolver.sonatypeSnapshots
}
else {
Opts.resolver.sonatypeStaging
}
)
useGpg := false
pgpPublicRing := baseDirectory.value / "project" / ".gnupg" / "pubring.asc"
pgpSecretRing := baseDirectory.value / "project" / ".gnupg" / "secring.asc"
pgpPassphrase := sys.env.get("PGP_PASS").map(_.toArray)
// Release settings (don't automatically publish upon release)
import ReleaseTransformations._
releaseProcess := Seq[ReleaseStep](
checkSnapshotDependencies,
inquireVersions,
runClean,
runTest,
setReleaseVersion,
commitReleaseVersion,
tagRelease,
// publishArtifacts,
setNextVersion,
commitNextVersion,
pushChanges
)
artifact in (Compile, assembly) := {
val art = (artifact in (Compile, assembly)).value
art.withClassifier(Some("assembly"))
}
addArtifact(artifact in (Compile, assembly), assembly)
artifact in (Test, assembly) := {
val art = (artifact in (Test, assembly)).value
art.withClassifier(Some("standalone"))
}
addArtifact(artifact in (Test, assembly), assembly in Test)
// Fix for SBT run to include the provided at runtime
run in Compile := Defaults.runTask(fullClasspath in Compile, mainClass in (Compile, run), runner in (Compile, run)).evaluated
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/operators/IsContainedIn.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.metric.stepActions.dataQuality.operators
import com.amazon.deequ.checks.Check
import com.yotpo.metorikku.metric.stepActions.dataQuality.Operator
class IsContainedIn(level: Option[String], column: String, allowedValues: Array[String]) extends Operator(level = level) {
override def getCheck(level: String): Check = {
new Check(getLevel(level), "Is contained check for column: %s".format(column)).isContainedIn(column, allowedValues)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/Registrator.scala | <reponame>rluta/metorikku<gh_stars>100-1000
package com.yotpo.metorikku.code.steps
import java.sql.Timestamp
import com.yotpo.metorikku.code.steps.functions.UserDefinedFunctions
import org.apache.spark.sql.functions.udf
object Registrator {
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
ss.udf.register("EPOCH_MILLI_TO_TIMESTAMP", udf[Timestamp, Long](UserDefinedFunctions.epochMilliToTimestamp))
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/writers/file/JSONOutputWriter.scala | <filename>src/main/scala/com/yotpo/metorikku/output/writers/file/JSONOutputWriter.scala
package com.yotpo.metorikku.output.writers.file
import com.yotpo.metorikku.configuration.job.output.File
class JSONOutputWriter(props: Map[String, String], outputFile: Option[File])
extends FileOutputWriter(Option(props).getOrElse(Map()) + ("format" -> "json"), outputFile)
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Redshift.scala | <gh_stars>100-1000
package com.yotpo.metorikku.configuration.job.output
case class Redshift(jdbcURL: String,
tempS3Dir: String) {
require(Option(jdbcURL).isDefined, "Redshift Database arguments: jdbcURL is mandatory.")
require(Option(tempS3Dir).isDefined, "Redshift Database arguments: tempS3Dir is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/ValidationRunner.scala | <filename>src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/ValidationRunner.scala
package com.yotpo.metorikku.metric.stepActions.dataQuality
import com.amazon.deequ.checks.{CheckResult, CheckStatus}
import com.amazon.deequ.metrics.DoubleMetric
import com.amazon.deequ.{VerificationResult, VerificationSuite}
import com.yotpo.metorikku.output.writers.file.ParquetOutputWriter
import org.apache.log4j.LogManager
import org.apache.spark.sql.{DataFrame, SparkSession}
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import scala.util.{Success, Try}
case class ValidationRunner() {
type FailedDFHandler = (String, DataFrame, Option[String]) => Unit
private val executingVerificationsMsg = s"Executing verification checks over dataframe %s"
private val validationsPassedMsg = s"The data passed the validations, everything is fine!"
private val validationsFailedMsg = s"There were validation errors in the data, the following constraints were not satisfied:"
private val validationsFailedExceptionMsg = s"Verifications failed over dataframe: %s"
private val cachingDataframeMsg = s"Caching dataframe: %s"
private val log = LogManager.getLogger(this.getClass)
def runChecks(dfName: String,
checks: List[DataQualityCheck],
level: Option[String],
cacheDf: Option[Boolean],
failedDfLocation: Option[String],
failedDFHandler: FailedDFHandler = storeFailedDataFrame): Unit = {
val dqChecks = checks.map {
dq => dq.getCheck(level.getOrElse("warn"))
}
val df = SparkSession.builder().getOrCreate().table(dfName)
cacheDf match {
case Some(false) =>
case _ => {
log.info(cachingDataframeMsg.format(dfName))
df.cache()
}
}
val verificationRunBuilder = VerificationSuite().onData(df).addChecks(dqChecks)
log.info(executingVerificationsMsg.format(dfName))
val verificationResult = verificationRunBuilder.run()
verificationResult.status match {
case CheckStatus.Success =>
log.info(validationsPassedMsg)
case CheckStatus.Error | CheckStatus.Warning =>
Try(failedDFHandler(dfName, df, failedDfLocation)).recover({ case e => log.error("Failed to handle failed dataframe", e) })
logFailedValidations(verificationResult)
case _ =>
}
if (verificationResult.status == CheckStatus.Error) {
throw DataQualityVerificationException(validationsFailedExceptionMsg.format(dfName))
}
}
private def storeFailedDataFrame(dfName: String, df: DataFrame, failedDfLocation: Option[String]) = {
failedDfLocation match {
case None =>
log.warn("Didn't find where to store failed data frame. skipping.")
case Some(prefix) =>
val uniqueName = s"${dfName}_${
LocalDateTime.now().format(
DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmssSSS"))
}"
val writer = new ParquetOutputWriter(Map[String, Any](
"path" -> s"${prefix}/${uniqueName}"
), None)
writer.write(df)
log.warn(s"Failed data frame was written to: ${uniqueName}")
}
}
private def logFailedValidations(verificationResult: VerificationResult) = {
logByLevel(verificationResult.status, validationsFailedMsg)
val results = verificationResult.checkResults
.map { case (_, checkResult) => checkResult }
results
.filter(_.status != CheckStatus.Success)
.foreach { checkResult => logFailedValidationCheck(verificationResult, checkResult) }
}
private def logFailedValidationCheck(verificationResult: VerificationResult, checkResult: CheckResult) = {
val validationCheckFailedMsg = s"${checkResult.check.description} failed"
val doubleMetricColumnConstrainFailedMsg = s"%.1f%% of rows failed to meet the constraint: %s"
val doubleMetricDataSetConstrainFailedMsg = s"Actual value: %f rows of data set failed to meet the constraint: %s"
logByLevel(verificationResult.status, validationCheckFailedMsg)
checkResult.constraintResults.foreach { constraintResult =>
constraintResult.metric match {
case Some(metric: DoubleMetric) =>
metric.value match {
case Success(value) =>
metric.entity.toString match {
case "Column" => logByLevel(verificationResult.status, doubleMetricColumnConstrainFailedMsg.format((100 - (value * 100)), metric.name))
case "Dataset" => logByLevel(verificationResult.status, doubleMetricDataSetConstrainFailedMsg.format(value, metric.name))
case "Mutlicolumn" => logByLevel(verificationResult.status, doubleMetricColumnConstrainFailedMsg.format((100 - (value * 100)), metric.name))
}
case _ =>
}
case _ =>
}
}
}
private def logByLevel(level: CheckStatus.Value, msg: String): Unit = {
level match {
case CheckStatus.Warning => log.warn(msg)
case CheckStatus.Error => log.error(msg)
case _ =>
}
}
case class DataQualityVerificationException(private val message: String = "",
private val cause: Throwable = None.orNull)
extends Exception(message, cause)
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/test/TesterSortData.scala | package com.yotpo.metorikku.test
case class TesterSortData(keys: List[String]) {
def sortEnrichedRows(a: EnrichedRow, b: EnrichedRow): Boolean = {
for (colName <- keys) {
if (a.row.get(colName) != b.row.get(colName)) {
return a.row.getOrElse(colName, 0).toString().hashCode() < b.row.getOrElse(colName, 0).toString().hashCode()
}
}
false
}
def sortStringRows(a: Map[String, String], b: Map[String, String]): Boolean = {
if (a.size != b.size) return false
for (key <- a.keys) {
if (a(key) != b(key)) {
return a(key)< b(key)
}
}
true
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/test/Params.scala | <reponame>rluta/metorikku<filename>src/main/scala/com/yotpo/metorikku/configuration/test/Params.scala
package com.yotpo.metorikku.configuration.test
case class Params(variables: Option[Map[String, String]])
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/FailedDFHandlerTest.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.metric.stepActions.dataQuality
import com.yotpo.metorikku.metric.stepActions.dataQuality.operators.HasUniqueness
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterEach, FunSuite}
class FailedDFHandlerTest extends FunSuite with BeforeAndAfterEach {
private var sparkSession: SparkSession = _
Logger.getLogger("org").setLevel(Level.WARN)
override def beforeEach() {
sparkSession = SparkSession.builder().appName("dq tests")
.master("local")
.config("", "")
.getOrCreate()
}
override def afterEach() {
sparkSession.stop()
}
test("given a successful dq check then does not invoke failure handler") {
executeDq(true, false)
}
test("given a failed dq check then invokes failure handler") {
executeDq(false, true)
}
test("given a failed dq check then invokes failure handler even if log is set to warning") {
executeDq(false, true, "warn")
}
test("if handler fails then it doesnt interrupt normal flow and throws correct exception"){
failedHandler()
}
private def failedHandler(): Unit = {
val employeeData = Seq(
("Maria", 1, "Smith", 111, 1111),
("Josh", 1, "Smith", 222, 2222)
)
val sqlContext = sparkSession.sqlContext
import sqlContext.implicits._
val fraction = Some("1.0")
val hasUniquenessCheck = new HasUniqueness(level = Some("error"), columns = Seq("id", "name"), fraction, Some("=="))
val dfName = "employee_data"
val df = employeeData.toDF(dfName, "id", "name", "fake", "fake2")
df.createOrReplaceTempView(dfName)
val exception = intercept[Exception] {
ValidationRunner().runChecks(dfName, List(DataQualityCheck(None, None, hasUniqueness = Some(hasUniquenessCheck))), Some("error"), None, None,
(_, _, _) => {
throw new Exception("Dump error")
})
}
assert(!exception.getMessage().startsWith("Verification failed over dataframe"))
}
private def executeDq(shouldPass: Boolean, shouldInvoke: Boolean, logLevel:String = "error"): Unit = {
val employeeData = Seq(
("Maria", 1, "Smith", 111, 1111),
("Josh", 1, "Smith", 222, 2222)
)
val sqlContext = sparkSession.sqlContext
import sqlContext.implicits._
val fraction = shouldPass match {
case true => Some("0.0")
case false => Some("1.0")
}
val hasUniquenessCheck = new HasUniqueness(level = Some(logLevel), columns = Seq("id", "name"), fraction, Some("=="))
val dfName = "employee_data"
val df = employeeData.toDF(dfName, "id", "name", "fake", "fake2")
df.createOrReplaceTempView(dfName)
var wasInvoked = false
val runDq = () => ValidationRunner().runChecks(dfName, List(DataQualityCheck(None, None, hasUniqueness = Some(hasUniquenessCheck))), Some(logLevel), None, None,
(_, _, _) => {
wasInvoked = true
})
(shouldPass, logLevel) match {
case (false, "error") =>
intercept[Exception] {
runDq()
}
case (_,_) =>
runDq()
}
assert(wasInvoked == shouldInvoke)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/Catalog.scala | package com.yotpo.metorikku.configuration.job
case class Catalog(database: Option[String])
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/test/TestUtil.scala | <filename>src/main/scala/com/yotpo/metorikku/test/TestUtil.scala
package com.yotpo.metorikku.test
import org.apache.log4j.LogManager
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.{col, when}
import scala.collection.mutable.ArrayBuffer
object TestUtil {
val log = LogManager.getLogger(this.getClass)
def getDuplicatedRowToIndexes(keys: Array[Map[String, String]]): Map[Map[String, String], List[Int]] = {
keys.zipWithIndex.groupBy(s => s._1).filter(x => x._2.length > 1).
mapValues(arrayOfTuples => arrayOfTuples.map(tupleIn => tupleIn._2).toList)
}
def flattenWithoutDuplications(array: Array[List[Int]]): List[Int] = array.flatten.groupBy(identity).keys.toList.sorted
def getRowsFromDf(df: DataFrame): List[Map[String, Any]] = {
df.rdd.map {
dfRow =>
dfRow.getValuesMap[Any](dfRow.schema.fieldNames)
}.collect().toList
}
def getColToMaxLengthValue(rows: List[Map[String, Any]]): Map[String, Int] = {
// the keys of head result should be from the expected format
// (actual results might have fields that are missing in the expected results (those fields need to be ignored)
rows.head.keys.map(colName => {
val valMaxLength = rows.maxBy(c => {
if (c(colName) == null) {
0
} else {
c(colName).toString.length
}
})
colName -> valMaxLength.get(colName).toString.length
}
).toMap
}
def getMismatchedVals(expectedRow: Map[String, Any], actualRow: Map[String, Any], mismatchingCols: ArrayBuffer[String]): ArrayBuffer[String] = {
var res = ArrayBuffer[String]()
for (mismatchCol <- mismatchingCols) {
res +:= s"${mismatchCol} - Expected = ${expectedRow(mismatchCol)}, Actual = ${actualRow(mismatchCol)}"
}
res
}
def getMismatchingColumns(actualRow: Map[String, Any], expectedRowCandidate: Map[String, Any]): ArrayBuffer[String] = {
var mismatchingCols = ArrayBuffer[String]()
for (key <- expectedRowCandidate.keys) {
val expectedValue = Option(expectedRowCandidate.get(key))
val actualValue = Option(actualRow.get(key))
// TODO: support nested Objects and Arrays
if (expectedValue.toString != actualValue.toString) {
mismatchingCols += key
}
}
mismatchingCols
}
def replaceColVal(df: DataFrame, colName: String, currValStr: String, newValStr: String): DataFrame = {
df.withColumn(colName, when(col(colName).equalTo(currValStr), newValStr)
.otherwise(col(colName)))
}
def dfToString(df: DataFrame, size: Int, truncate: Boolean): String = {
val outCapture = new java.io.ByteArrayOutputStream
Console.withOut(outCapture) {
df.show(size, truncate)
}
"\n" + new String(outCapture.toByteArray)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/WriterFactory.scala | package com.yotpo.metorikku.output
import com.yotpo.metorikku.Job
import com.yotpo.metorikku.configuration.job.Configuration
import com.yotpo.metorikku.configuration.metric.{Output, OutputType}
import com.yotpo.metorikku.exceptions.MetorikkuException
import com.yotpo.metorikku.output.writers.cassandra.CassandraOutputWriter
import com.yotpo.metorikku.output.writers.file._
import com.yotpo.metorikku.output.writers.instrumentation.InstrumentationOutputWriter
import com.yotpo.metorikku.output.writers.jdbc.{JDBCOutputWriter, JDBCQueryWriter}
import com.yotpo.metorikku.output.writers.kafka.KafkaOutputWriter
import com.yotpo.metorikku.output.writers.redis.RedisOutputWriter
import com.yotpo.metorikku.output.writers.redshift.RedshiftOutputWriter
import com.yotpo.metorikku.output.writers.segment.SegmentOutputWriter
import com.yotpo.metorikku.output.writers.elasticsearch.ElasticsearchOutputWriter
object WriterFactory {
// scalastyle:off cyclomatic.complexity
def get(outputConfig: Output, metricName: String, configuration: Configuration, job: Job): Writer = {
val output = outputConfig.name match {
case Some(name) => configuration.outputs.get.get(name).get
case None => configuration.output.getOrElse(com.yotpo.metorikku.configuration.job.Output())
}
val metricOutputOptions = outputConfig.outputOptions.asInstanceOf[Map[String, String]]
val metricOutputWriter = outputConfig.outputType match {
case OutputType.Cassandra => new CassandraOutputWriter(metricOutputOptions, job.sparkSession) //TODO add here cassandra from session
case OutputType.Redshift => new RedshiftOutputWriter(metricOutputOptions, output.redshift)
case OutputType.Redis => new RedisOutputWriter(metricOutputOptions, job.sparkSession) //TODO add here redis from session
case OutputType.Segment => new SegmentOutputWriter(metricOutputOptions, output.segment, job.instrumentationFactory)
case OutputType.File => new FileOutputWriter(metricOutputOptions, output.file)
case OutputType.CSV => new CSVOutputWriter(metricOutputOptions, output.file)
case OutputType.JSON => new JSONOutputWriter(metricOutputOptions, output.file)
case OutputType.Parquet => new ParquetOutputWriter(metricOutputOptions, output.file)
case OutputType.Hudi => new HudiOutputWriter(metricOutputOptions, output.hudi)
case OutputType.Instrumentation => new InstrumentationOutputWriter(
metricOutputOptions,
outputConfig.dataFrameName, metricName, job.instrumentationFactory)
case OutputType.JDBC => new JDBCOutputWriter(metricOutputOptions, output.jdbc)
case OutputType.JDBCQuery => new JDBCQueryWriter(metricOutputOptions, output.jdbc)
case OutputType.Kafka => new KafkaOutputWriter(metricOutputOptions, output.kafka)
case OutputType.Elasticsearch => new ElasticsearchOutputWriter(metricOutputOptions, output.elasticsearch.get)
case OutputType.Catalog => new CatalogWriter(metricOutputOptions)
case _ => throw new MetorikkuException(s"Not Supported Writer ${outputConfig.outputType}")
}
metricOutputWriter.validateMandatoryArguments(metricOutputOptions.asInstanceOf[Map[String, String]])
metricOutputWriter
}
// scalastyle:on cyclomatic.complexity
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/input/readers/file/FilesInput.scala | package com.yotpo.metorikku.input.readers.file
import com.yotpo.metorikku.input.Reader
import org.apache.spark.sql.{DataFrame, SparkSession}
case class FilesInput(name: String,
paths: Seq[String],
options: Option[Map[String, String]],
schemaPath: Option[String],
format: Option[String]) extends Reader with FileInputBase {
def read(sparkSession: SparkSession): DataFrame = {
val readFormat = getFormat(format, paths.head)
val reader = sparkSession.read.format(readFormat)
val readOptions = getOptions(readFormat, options)
val schema = getSchemaStruct(schemaPath, sparkSession)
readOptions match {
case Some(opts) => reader.options(opts)
case None =>
}
schema match {
case Some(schemaStruct) => reader.schema(schemaStruct)
case None =>
}
val df = reader.load(paths: _*)
processDF(df, readFormat)
}
}
|
rluta/metorikku | version.sbt | version in ThisBuild := "0.0.127-SNAPSHOT"
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/metric/Configuration.scala | <gh_stars>100-1000
package com.yotpo.metorikku.configuration.metric
case class Configuration(steps: List[Step], output: Option[List[Output]])
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/DataQualityCheck.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.metric.stepActions.dataQuality
import com.amazon.deequ.checks.Check
import com.yotpo.metorikku.metric.stepActions.dataQuality.operators.{HasSize, HasUniqueness, IsComplete, IsUnique, IsContainedIn}
import org.apache.log4j.LogManager
case class DataQualityCheck(
isComplete: Option[IsComplete] = None,
isUnique: Option[IsUnique] = None,
hasSize: Option[HasSize] = None,
hasUniqueness: Option[HasUniqueness] = None,
isContainedIn: Option[IsContainedIn] = None
) {
private val log = LogManager.getLogger(this.getClass)
def getCheck(level: String): Check = {
val operator = Seq(isComplete, isUnique, hasSize, hasUniqueness, isContainedIn).find(
x => x.isDefined
).get.get
try {
operator.getCheck(operator.level.getOrElse(level))
} catch {
case e: Exception => {
log.error("Failed to retrieve check, verify operator usage arguments")
throw e
}
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Elasticsearch.scala | package com.yotpo.metorikku.configuration.job.output
case class Elasticsearch(nodes: String, user: Option[String], password: Option[String])
{
require(Option(nodes).isDefined, "Elasticsearch connection: nodes is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/MetorikkuTester.scala | package com.yotpo.metorikku
import com.yotpo.metorikku.configuration.test.ConfigurationParser
import com.yotpo.metorikku.test.Tester
import org.apache.log4j.LogManager
object MetorikkuTester extends App {
lazy val log = LogManager.getLogger(this.getClass)
val configs = ConfigurationParser.parse(args)
configs.foreach(config => {
Tester(config).run
})
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/functions/UserDefinedFunctions.scala | <filename>src/main/scala/com/yotpo/metorikku/code/steps/functions/UserDefinedFunctions.scala
package com.yotpo.metorikku.code.steps.functions
import java.sql.Timestamp
import java.time.Instant
object UserDefinedFunctions {
def epochMilliToTimestamp(timestamp_epoch: Long): Timestamp = {
val instant: Instant = Instant.ofEpochMilli(timestamp_epoch)
Timestamp.from(instant)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/operators/IsUnique.scala | package com.yotpo.metorikku.metric.stepActions.dataQuality.operators
import com.amazon.deequ.checks.Check
import com.yotpo.metorikku.metric.stepActions.dataQuality.Operator
class IsUnique(level: Option[String], column: String) extends Operator(level = level) {
override def getCheck(level: String): Check = {
new Check(getLevel(level), "Uniqueness check for column: %s".format(column)).isUnique(column)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Segment.scala | package com.yotpo.metorikku.configuration.job.output
case class Segment(apiKey: String) {
require(Option(apiKey).isDefined, "Segment API Key is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/test/ErrorMessage.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.test
import org.apache.log4j.LogManager
import org.apache.spark.sql.SparkSession
object ResultsType extends Enumeration {
val expected = Value("Expected")
val actual = Value("Actual")
}
case class MismatchData(expectedIndex: Int, actualIndex: Int,
mismatchingCols: List[String], mismatchingVals: List[String],
keyDataStr: String)
case class InvalidSchemaData(rowIndex: Int, invalidColumnsMissing: List[String], invalidColumnsUnexpected: List[String])
trait ErrorMessage {
val log = LogManager.getLogger(this.getClass)
def toString(): String
def logError(sparkSession: Option[SparkSession] = None): Unit
}
class InvalidKeysNonExistingErrorMessage(tableName: String, invalidCols: List[String], allCols: List[String]) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {}
override def toString(): String = {
s"Defined non existing columns as keys for table ${tableName}: " +
s"The invalid defined keys: ${invalidCols.sortWith(_ < _).mkString(", ")}. " +
s"All columns defined for ${tableName} table: ${allCols.sortWith(_ < _).mkString(", ")}"
}
}
class InvalidSchemaResultsErrorMessage(tableToInvalidSchemaData: Map[String, List[InvalidSchemaData]]) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {}
override def toString: String = {
val invalidTableSchemaMessage = tableToInvalidSchemaData.map { case (tableName, listOfSchemaErrData) =>
s"Table Name = ${tableName} \n" +
listOfSchemaErrData.map(schemaErrData => {
val invalidColumnsUnexpected = schemaErrData.invalidColumnsUnexpected.nonEmpty match {
case true => s"\tExpected row number ${schemaErrData.rowIndex} had the following unexpected columns: " +
s"[${schemaErrData.invalidColumnsUnexpected.mkString(", ")}]\n"
case _ => ""
}
val invalidColumnsMissing = schemaErrData.invalidColumnsMissing.nonEmpty match {
case true => s"\tExpected row number ${schemaErrData.rowIndex} is missing the following expected columns: " +
s"[${schemaErrData.invalidColumnsMissing.mkString(", ")}]\n"
case _ => ""
}
invalidColumnsMissing + invalidColumnsUnexpected
}).mkString("\n")
}
"\nError: Failed while validating the schema of the expected results. \n" +
"All expected results must have an identical structure - same as the columns defined for the first expected result\n" +
s"The following tables had invalid schema: \n${invalidTableSchemaMessage.mkString("\n")}"
}
}
class DuplicatedHeaderErrorMessage() extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {}
override def toString(): String = {
"Error: Found duplications in the results"
}
}
class DuplicationsErrorMessage(resultType: ResultsType.Value, duplicatedRowsToIndexes: Map[Map[String, String], List[Int]],
results: Option[EnrichedRows], tableName: String, keyColumns: KeyColumns) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession]): Unit = {
if (sparkSession.isDefined) {
log.error(toString)
}
log.warn(s"***************** $tableName $resultType results with Duplications *******************")
val indexes = duplicatedRowsToIndexes.flatMap(_._2).toList
val subExpectedError = results.get.getSubTable(indexes :+ results.get.size() - 1)
val expectedKeys = results.get.getHeadRowKeys()
val dfWithId = subExpectedError.toDF(resultType, expectedKeys, sparkSession.get)
log.warn(TestUtil.dfToString(TestUtil.replaceColVal(dfWithId, "row_number", results.get.size().toString, " "), subExpectedError.size, truncate = false))
}
override def toString: String = {
s"$tableName Duplications - ${duplicatedRowsToIndexes.map{case (row, indexes) =>
s"The key [${keyColumns.getRowKeyStr(row)}] was found in the ${resultType} results rows: " +
s"${indexes.map(_ + 1).sortWith(_ < _).mkString(", ")}"}.mkString("\n")}"
}
}
class MismatchedKeyResultsErrorMessage(expectedErrorIndexes: List[Int], actualErrorIndexes: List[Int],
expectedResults: EnrichedRows, actualResults: EnrichedRows,
keyColumns: KeyColumns, tableName: String) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {
val alignmentRowIndexExpected = expectedResults.size()-1
val alignmentRowIndexActual = actualResults.size()-1
EnrichedRows.logSubtableErrors(expectedResults, actualResults,
expectedErrorIndexes :+ alignmentRowIndexExpected, actualErrorIndexes :+ alignmentRowIndexActual, true, sparkSession.get, tableName)
}
override def toString: String = {
expectedErrorIndexes.map(errorRowindex => {
val keyToOutput = keyColumns.getRowKeyStr(expectedResults.getEnrichedRowByIndex(errorRowindex).getRow())
s"Error: Missing expected " +
s"row with the key [${keyToOutput}] - (expected row_number = ${errorRowindex + 1})" }).mkString(",\n") + "\n\n" +
actualErrorIndexes.map(errorRowindex => {
val keyToOutput = keyColumns.getRowKeyStr(actualResults.getEnrichedRowByIndex(errorRowindex).getRow)
s"Error: Got unexpected result - didn't expect to find " +
s"a row with the key [${keyToOutput}] (printed row_number in actual results = ${errorRowindex + 1})"}).mkString(",\n")
}
}
class MismatchedResultsAllColsErrorMsg(expectedResults: EnrichedRows, actualResults: EnrichedRows,
mismatchData: List[MismatchData], tableName: String) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {
val alignmentRowIndexExpected = expectedResults.size()-1
val alignmentRowIndexActual = actualResults.size()-1
EnrichedRows.logSubtableErrors(expectedResults, actualResults,
mismatchData.map(_.expectedIndex) :+ alignmentRowIndexExpected, mismatchData.map(_.actualIndex) :+ alignmentRowIndexActual,
true, sparkSession.get, tableName)
}
override def toString(): String = mismatchData.map(errData => {
s"Error: Failed on expected row number ${errData.expectedIndex + 1} with key " +
s"[${errData.keyDataStr}] - \n" +
s"Column values mismatch on [${errData.mismatchingCols.sortWith(_ < _).mkString(", ")}] fields " +
s"with the values [${errData.mismatchingVals.sortWith(_ < _).mkString(", ")}].\n" +
s"The actual result row with the same key is number ${errData.actualIndex + 1}\n "
}).mkString(",\n")
}
class MismatchedResultsKeysErrMsgMock(errorIndexes: (ResultsType.Value, Int), expectedResult: Map[String, Any],
actualResult: Map[String, Any], keyColumns: KeyColumns) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {}
override def toString: String = {
errorIndexes._1 match {
case ResultsType.expected =>
val keyToOutput = keyColumns.getRowKeyStr(expectedResult)
s"Error: Missing expected " +
s"row with the key [${keyToOutput}] - (expected row_number = ${errorIndexes._2})"
case _ =>
val keyToOutput = keyColumns.getRowKeyStr(actualResult)
s"Error: Got unexpected result - didn't expect to find " +
s"a row with the key [${keyToOutput}] (printed row_number in actual results = ${errorIndexes._2})"
}
}
}
class MismatchedResultsColsErrMsgMock(rowKeyStr: String, expectedRowIndex: Int, actualRowIndex: Int,
mismatchingCols: List[String], mismatchingVals: List[String],
keyColumns: KeyColumns) extends ErrorMessage {
override def logError(sparkSession: Option[SparkSession] = None): Unit = {}
override def toString(): String = {
s"Error: Failed on expected row number ${expectedRowIndex} with key " +
s"[${rowKeyStr}] - \n" +
s"Column values mismatch on [${mismatchingCols.sortWith(_ < _).mkString(", ")}] fields " +
s"with the values [${mismatchingVals.sortWith(_ < _).mkString(", ")}].\n" +
s"The actual result row with the same key is number $actualRowIndex\n "
}
}
object ErrorMessage {
def getErrorMessagesByDuplications(resType: ResultsType.Value, duplicatedRowToIndexes: Map[Map[String, String], List[Int]],
results: EnrichedRows, tableName: String, keyColumns: KeyColumns): Array[ErrorMessage] = {
if (duplicatedRowToIndexes.nonEmpty) {
Array[ErrorMessage](new DuplicationsErrorMessage(resType, duplicatedRowToIndexes, Option(results), tableName, keyColumns))
} else {
Array[ErrorMessage]()
}
}
def getErrorMessagesByMismatchedAllCols(tableKeys: List[String], expectedEnrichedRows: EnrichedRows, actualEnrichedRows: EnrichedRows,
sparkSession: SparkSession, tableName: String): Array[ErrorMessage] = {
val sorter = TesterSortData(tableKeys)
val (sortedExpectedEnrichedRows, sortedActualEnrichedRows) = (expectedEnrichedRows.sortWith(sorter.sortEnrichedRows),
actualEnrichedRows.sortWith(sorter.sortEnrichedRows))
sortedExpectedEnrichedRows.zipWithIndex.flatMap { case (expectedResult, sortedIndex) =>
val expectedIndex = expectedResult.index
val actualIndex = sortedActualEnrichedRows.getEnrichedRowByIndex(sortedIndex).index
val actualResultRow = sortedActualEnrichedRows.getEnrichedRowByIndex(sortedIndex).getRow()
val mismatchingCols = TestUtil.getMismatchingColumns(actualResultRow, expectedResult.row)
if (mismatchingCols.nonEmpty) {
getMismatchedAllColsErrorMsg(List[(Int, Int)]() :+ (expectedIndex, actualIndex), expectedEnrichedRows, actualEnrichedRows,
tableKeys, sparkSession, tableName).map(Some(_))
} else {
None
}
}.flatten.toArray
}
def getMismatchedAllColsErrorMsg(expectedMismatchedActualIndexesMap: List[(Int, Int)], expectedResults: EnrichedRows,
actualResults: EnrichedRows, tableKeys: List[String],
sparkSession: SparkSession, tableName: String): Array[ErrorMessage] = {
val mismatchDataArr = expectedMismatchedActualIndexesMap.map {
case (expIndex, actIndex) => {
val expRow = expectedResults.getEnrichedRowByIndex(expIndex)
val actRow = actualResults.getEnrichedRowByIndex(actIndex)
val mismatchingCols = TestUtil.getMismatchingColumns(actRow.getRow(), expRow.getRow())
val mismatchingVals = TestUtil.getMismatchedVals(expRow.getRow(), actRow.getRow(), mismatchingCols).toList
val keyColumns = KeyColumns(tableKeys)
val tableKeysVal = keyColumns.getKeysMapFromRow(expRow.getRow())
val keyDataStr = tableKeysVal.mkString(", ")
MismatchData(expIndex, actIndex, mismatchingCols.toList, mismatchingVals, keyDataStr)
}
}
Array[ErrorMessage](new MismatchedResultsAllColsErrorMsg(expectedResults, actualResults, mismatchDataArr, tableName))
}
def getErrorMessageByMismatchedKeys(expectedResults: EnrichedRows, actualResults: EnrichedRows,
expErrorIndexes: List[Int], actErrorIndexes: List[Int],
keyColumns: KeyColumns, tableName: String): Array[ErrorMessage] = {
Array[ErrorMessage](new MismatchedKeyResultsErrorMessage(expErrorIndexes, actErrorIndexes, expectedResults, actualResults, keyColumns, tableName))
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/ToAvro.scala | package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.functions.struct
import za.co.absa.abris.avro.functions.to_confluent_avro
import za.co.absa.abris.avro.read.confluent.SchemaManager
object ToAvro {
val message = "You need to send the following parameters to output to Avro format:" +
"table, schema.registry.url, schema.registry.topic, schema.name, schema.namespace " +
"Will create an entry in the schema registry under: <schema.registry.topic>-value or <schema.registry.topic>-key"
private class InputMatcher[K](ks: K*) {
def unapplySeq[V](m: Map[K, V]): Option[Seq[V]] = if (ks.forall(m.contains)) Some(ks.map(m)) else None
}
private val InputMatcher = new InputMatcher("table", "schema.registry.url", "schema.registry.topic", "schema.name", "schema.namespace")
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params.get match {
case InputMatcher(tableName, schemaRegistryUrl, schemaRegistryTopic, schemaName, schemaNamespace) => {
val dataFrame = ss.table(tableName)
val commonRegistryConfig = Map(
SchemaManager.PARAM_SCHEMA_REGISTRY_URL -> schemaRegistryUrl,
SchemaManager.PARAM_SCHEMA_REGISTRY_TOPIC -> schemaRegistryTopic,
SchemaManager.PARAM_VALUE_SCHEMA_NAME_FOR_RECORD_STRATEGY -> schemaName,
SchemaManager.PARAM_VALUE_SCHEMA_NAMESPACE_FOR_RECORD_STRATEGY -> schemaNamespace,
SchemaManager.PARAM_KEY_SCHEMA_NAME_FOR_RECORD_STRATEGY -> schemaName,
SchemaManager.PARAM_KEY_SCHEMA_NAMESPACE_FOR_RECORD_STRATEGY -> schemaNamespace
)
val keyRegistryConfig = commonRegistryConfig +
(SchemaManager.PARAM_KEY_SCHEMA_NAMING_STRATEGY -> SchemaManager.SchemaStorageNamingStrategies.TOPIC_NAME)
val valueRegistryConfig = commonRegistryConfig +
(SchemaManager.PARAM_VALUE_SCHEMA_NAMING_STRATEGY -> SchemaManager.SchemaStorageNamingStrategies.TOPIC_NAME)
// scalastyle:off null
var avroDf: DataFrame = null
if(dataFrame.columns.contains("key")) {
avroDf = dataFrame.select(
to_confluent_avro(col("key"), keyRegistryConfig) as 'key,
to_confluent_avro(col("value"), valueRegistryConfig) as 'value)
} else {
avroDf = dataFrame.select(
to_confluent_avro(col("value"), valueRegistryConfig) as 'value)
}
avroDf.createOrReplaceTempView(dataFrameName)
// scalastyle:on null
}
case _ => throw MetorikkuException(message)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/CamelCaseColumnNames.scala | package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
object CamelCaseColumnNames {
val message = "You need to send 1 parameters with the names of the table to change: table"
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params match {
case Some(parameters) =>
val table = parameters("table")
val df = ss.table(table)
val camelCaseColumnNames = df.columns.map(underscoreToCamel)
df.toDF(camelCaseColumnNames: _*).createOrReplaceTempView(dataFrameName)
case None => throw MetorikkuException(message)
}
}
private def underscoreToCamel(name: String): String = "_([a-z\\d])".r.replaceAllIn(name, { m =>
m.group(1).toUpperCase()
})
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/input/readers/elasticsearch/ElasticsearchInput.scala | package com.yotpo.metorikku.input.readers.elasticsearch
import com.yotpo.metorikku.input.Reader
import org.apache.spark.sql.{DataFrame, SparkSession}
case class ElasticsearchInput(name: String, nodes: String, user: Option[String],
password: Option[String], index: String,
options: Option[Map[String, String]]) extends Reader {
def read(sparkSession: SparkSession): DataFrame = {
var elasticsearchOptions = Map("es.nodes" -> nodes)
if (user.nonEmpty) {
elasticsearchOptions += ("es.net.http.auth.user" -> user.get)
}
if (password.nonEmpty) {
elasticsearchOptions += ("es.net.http.auth.pass" -> password.get)
}
elasticsearchOptions ++= options.getOrElse(Map())
val dbTable = sparkSession.read.format("org.elasticsearch.spark.sql").options(elasticsearchOptions)
dbTable.load(index)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/input/readers/jdbc/JDBCInput.scala | package com.yotpo.metorikku.input.readers.jdbc
import com.yotpo.metorikku.input.Reader
import org.apache.spark.sql.{DataFrame, SparkSession}
case class JDBCInput(val name: String, connectionUrl: String, user: String,
password: String, table: String,
options: Option[Map[String, String]]) extends Reader {
def read(sparkSession: SparkSession): DataFrame = {
val url = connectionUrl
val baseDBOptions = Map(
"url" -> url,
"user" -> user,
"password" -> password,
"dbTable" -> table)
val DBOptions = baseDBOptions ++ options.getOrElse(Map())
val dbTable = sparkSession.read.format("jdbc").options(DBOptions)
dbTable.load()
}
}
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/metric/test/MetricReporterTester.scala | <filename>src/test/scala/com/yotpo/metorikku/metric/test/MetricReporterTester.scala
package com.yotpo.metorikku.metric.test
import com.yotpo.metorikku.metric.MetricReporting
import org.apache.log4j.{Level, LogManager, Logger}
import org.apache.spark.sql.SparkSession
import org.scalatest.{FunSuite, _}
import org.apache.spark.sql.types._
//noinspection ScalaStyle
class MetricReporterTester extends FunSuite with BeforeAndAfterEach {
private val log: Logger = LogManager.getLogger(this.getClass)
private var sparkSession : SparkSession = _
Logger.getLogger("org").setLevel(Level.WARN)
override def beforeEach() {
sparkSession = SparkSession.builder().appName("udf tests")
.master("local")
.config("", "")
.getOrCreate()
}
test("Test getMaxDataframeTime") {
val schema = StructType(Array(
StructField("userId", IntegerType, true),
StructField("movieId", IntegerType, true),
StructField("rating", DoubleType, true),
StructField("created_at", LongType, true),
StructField("updated_at", TimestampType, true)))
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
val df = sparkSession.read.format("csv").option("header", "true").schema(schema).load("src/test/configurations/mocks/ratings_time.csv")
val metricReport = new MetricReporting()
val maxUpdatedAt = metricReport.getMaxDataframeTime(df, Option("updated_at"), None)
val maxCreatedAt = metricReport.getMaxDataframeTime(df, Option("created_at"), Option("SECONDS"))
df.cache
assert(maxUpdatedAt == 1462488216000L)
assert(maxCreatedAt == 1260759205000L)
}
test("Test getMaxDataframeTime FAILs with invalid reportLagTimeColumn Units specified") {
val schema = StructType(Array(
StructField("userId", IntegerType, true),
StructField("movieId", IntegerType, true),
StructField("rating", DoubleType, true),
StructField("created_at", LongType, true),
StructField("updated_at", TimestampType, true)))
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
val sqlContext= sparkSession.sqlContext
val df = sparkSession.read.format("csv").option("header", "true").schema(schema).load("src/test/configurations/mocks/ratings_time.csv")
val metricReport = new MetricReporting()
val thrown = intercept[Exception] {
metricReport.getMaxDataframeTime(df, Option("created_at"), Option("HOUR"))
}
assert(thrown.getMessage.startsWith("Some(HOUR) is not a legal argument for units, use one of the following: [SECONDS,MILLISECONDS]"))
}
test("Test getMaxDataframeTime FAILs reportLagTimeColumn is not defined") {
val schema = StructType(Array(
StructField("userId", IntegerType, true),
StructField("movieId", IntegerType, true),
StructField("rating", DoubleType, true),
StructField("created_at", LongType, true),
StructField("updated_at", TimestampType, true)))
val sparkSession = SparkSession.builder.appName("test").getOrCreate()
val df = sparkSession.read.format("csv").option("header", "true").schema(schema).load("src/test/configurations/mocks/ratings_time.csv")
val metricReport = new MetricReporting()
val thrown = intercept[Exception] {
metricReport.getMaxDataframeTime(df, None, None)
}
assert(thrown.getMessage.startsWith("Failed to report lag time, reportLagTimeColumn is not defined"))
}
override def afterEach() {
sparkSession.stop()
}
}
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/tags/UnsupportedInCurrentVersion.scala | package com.yotpo.metorikku.tags
import org.scalatest.Tag
object UnsupportedInCurrentVersion extends Tag("com.yotpo.metorikku.tags.UnsupportedInCurrentVersion")
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/operators/Evaluator.scala | package com.yotpo.metorikku.metric.stepActions.dataQuality.operators
case class Evaluator() {
def dqAssertion[N<%Ordered[N]](operator: String, evaluatee: N): N => Boolean = operator match {
case "==" => {_ == evaluatee}
case "!=" => {_ != evaluatee}
case ">=" => {_ >= evaluatee}
case ">" => {_ > evaluatee}
case "<=" => {_ <= evaluatee}
case "<" => {_ < evaluatee}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/Sql.scala | package com.yotpo.metorikku.metric.stepActions
import com.yotpo.metorikku.metric.StepAction
import com.yotpo.metorikku.metric.stepActions.dataQuality.DataQualityCheckList
import org.apache.log4j.LogManager
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
* Represents the SQL query to run
*/
case class Sql(query: String, dataFrameName: String, showPreviewLines: Int,
cacheOnPreview: Option[Boolean],
showQuery: Option[Boolean],
dq: Option[DataQualityCheckList]
) extends StepAction[DataFrame] {
val log = LogManager.getLogger(this.getClass)
override def run(sparkSession: SparkSession): DataFrame = {
showQuery match {
case Some(true) => log.info(s"Query for step ${dataFrameName}:\n${query}")
case _ =>
}
val newDf = sparkSession.sqlContext.sql(query)
newDf.createOrReplaceTempView(dataFrameName)
printStep(newDf, dataFrameName)
runDQValidation(dataFrameName, dq)
newDf
}
private def printStep(stepResult: DataFrame, stepName: String): Unit = {
if (showPreviewLines > 0) {
log.info(s"Previewing step: ${stepName}")
stepResult.printSchema()
cacheOnPreview match {
case Some(true) => {
log.info(s"Caching step: ${stepName}")
stepResult.cache()
}
case _ =>
}
stepResult.isStreaming match {
case true => log.warn("Can't show preview when using a streaming source")
case false => stepResult.show(showPreviewLines, truncate = false)
}
}
}
private def runDQValidation(dfName: String, dqDef: Option[DataQualityCheckList]): Unit = {
dqDef match {
case Some(dq) => dq.runChecks(dfName)
case _ =>
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/instrumentation/InfluxDBConfig.scala | <filename>src/main/scala/com/yotpo/metorikku/configuration/job/instrumentation/InfluxDBConfig.scala<gh_stars>100-1000
package com.yotpo.metorikku.configuration.job.instrumentation
case class InfluxDBConfig(url: String,
username: Option[String],
password: Option[String],
dbName: String)
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/code/steps/AlignTables.scala | package com.yotpo.metorikku.code.steps
import com.yotpo.metorikku.exceptions.MetorikkuException
import org.apache.spark.sql.Column
import org.apache.spark.sql.functions.{col, lit}
object AlignTables {
val message = "You need to send 2 parameters with the names of the dataframes to align: from, to"
private def align(fromCols: Array[String], toCols: Array[String]): Array[Column] = {
toCols.map( {
case x if fromCols.contains(x) => col(x)
// scalastyle:off null
case y => lit(null).as(y)
// scalastyle:on null
})
}
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
params match {
case Some(parameters) =>
val fromName = parameters("from")
val toName = parameters("to")
val from = ss.table(fromName)
val to = ss.table(toName)
val aligned = from.select(align(from.columns, to.columns): _*)
aligned.createOrReplaceTempView(dataFrameName)
case None => throw MetorikkuException(message)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Hudi.scala | package com.yotpo.metorikku.configuration.job.output
case class Hudi(dir: String,
parallelism: Option[String],
maxFileSize: Option[String],
operation: Option[String],
storageType: Option[String],
maxVersions: Option[String],
hiveDB: Option[String],
hiveJDBCURL: Option[String],
hiveUserName: Option[String],
hivePassword: Option[String],
hiveSync: Option[Boolean],
manualHiveSync: Option[Boolean],
manualHiveSyncPartitions: Option[Map[String,String]],
deletePendingCompactions: Option[Boolean],
options: Option[Map[String, String]]
) {
require(Option(dir).isDefined, "Hudi file directory: dir is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/writers/redis/RedisOutputWriter.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.output.writers.redis
import com.redislabs.provider.redis._
import com.yotpo.metorikku.Job
import com.yotpo.metorikku.configuration.job.output.Redis
import com.yotpo.metorikku.output.{WriterSessionRegistration, Writer}
import org.apache.log4j.LogManager
import org.apache.spark.sql.{DataFrame, SparkSession}
import scala.util.parsing.json.JSONObject
object RedisOutputWriter extends WriterSessionRegistration {
def addConfToSparkSession(sparkSessionBuilder: SparkSession.Builder, redisConf: Redis): Unit = {
sparkSessionBuilder.config(s"redis.host", redisConf.host)
redisConf.port.foreach(_port => sparkSessionBuilder.config(s"redis.port", _port))
redisConf.auth.foreach(_auth => sparkSessionBuilder.config(s"redis.auth", _auth))
redisConf.db.foreach(_db => sparkSessionBuilder.config(s"redis.db", _db))
}
}
class RedisOutputWriter(props: Map[String, String], sparkSession: SparkSession) extends Writer {
case class RedisOutputProperties(keyColumn: String)
val log = LogManager.getLogger(this.getClass)
val redisOutputOptions = RedisOutputProperties(props("keyColumn"))
override def write(dataFrame: DataFrame): Unit = {
if (isRedisConfExist()) {
val columns = dataFrame.columns.filter(_ != redisOutputOptions.keyColumn)
import dataFrame.sparkSession.implicits._
val redisDF = dataFrame.na.fill(0).na.fill("")
.map(row => row.getAs[Any](redisOutputOptions.keyColumn).toString ->
JSONObject(row.getValuesMap(columns)).toString()
)
log.info(s"Writting Dataframe into redis with key ${redisOutputOptions.keyColumn}")
redisDF.sparkSession.sparkContext.toRedisKV(redisDF.toJavaRDD)
} else {
log.error(s"Redis Configuration does not exists")
}
}
private def isRedisConfExist(): Boolean = sparkSession.conf.getOption(s"redis.host").isDefined
}
|
rluta/metorikku | src/test/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/IsCompleteTest.scala | package com.yotpo.metorikku.metric.stepActions.dataQuality
import com.yotpo.metorikku.metric.stepActions.dataQuality.operators.IsComplete
import com.yotpo.metorikku.tags.UnsupportedInCurrentVersion
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterEach, FunSuite}
class IsCompleteTest extends FunSuite with BeforeAndAfterEach {
private var sparkSession : SparkSession = _
Logger.getLogger("org").setLevel(Level.WARN)
override def beforeEach() {
sparkSession = SparkSession.builder().appName("dq tests")
.master("local")
.config("", "")
.getOrCreate()
}
private def validateIsCompleteOverDf(employeeData: Seq[(String, Int, Integer, Int, Int)], level: String): Unit = {
val sqlContext = sparkSession.sqlContext
val isCompleteCheck = new IsComplete(level = Some(level),column = "salary")
val dqCheckDefinitionList = DataQualityCheckList(List[DataQualityCheck](DataQualityCheck(isComplete = Some(isCompleteCheck))), None, None)
import sqlContext.implicits._
val dfName = "employee_data"
val df = employeeData.toDF(dfName, "id", "salary", "fake", "fake2")
df.createOrReplaceTempView(dfName)
dqCheckDefinitionList.runChecks(dfName)
}
test("is_complete on a non-unique field with level error should raise exception",UnsupportedInCurrentVersion) {
val employeeData = Seq(
("James", 1, null.asInstanceOf[Integer], 111, 1111),
("Maria", 2, Integer.valueOf(22), 222, 2222)
)
val level = "error"
val thrown = intercept[Exception] {
validateIsCompleteOverDf(employeeData, level)
}
assert(thrown.getMessage.startsWith("Verifications failed over dataframe: employee_data"))
}
test("is_complete on a unique field with level error should not raise exception",UnsupportedInCurrentVersion) {
val employeeData = Seq(
("James", 1, Integer.valueOf(11), 111, 1111),
("Maria", 2, Integer.valueOf(22), 222, 2222)
)
val level = "error"
validateIsCompleteOverDf(employeeData, level)
}
test("is_complete on a non-unique field with level warn should not raise exception",UnsupportedInCurrentVersion) {
val employeeData = Seq(
("James", 1, null.asInstanceOf[Integer], 111, 1111),
("Maria", 2, Integer.valueOf(22), 222, 2222)
)
val level = "warn"
validateIsCompleteOverDf(employeeData, level)
}
override def afterEach() {
sparkSession.stop()
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Cassandra.scala | package com.yotpo.metorikku.configuration.job.output
case class Cassandra(host: String,
username: Option[String],
password: Option[String]) {
require(Option(host).isDefined, "Cassandra database connection: host is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/metric/ConfigurationParser.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.configuration.metric
import java.io.{File, FileNotFoundException}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.yotpo.metorikku.exceptions.MetorikkuInvalidMetricFileException
import com.yotpo.metorikku.metric.Metric
import com.yotpo.metorikku.utils.FileUtils
import org.apache.commons.io.FilenameUtils
import org.apache.log4j.{LogManager, Logger}
object ConfigurationParser {
val log: Logger = LogManager.getLogger(this.getClass)
val validExtensions = Seq("json", "yaml", "yml")
def isValidFile(path: File): Boolean = {
val fileName = path.getName
val extension = FilenameUtils.getExtension(fileName)
validExtensions.contains(extension)
}
def parse(path: String): Metric = {
val hadoopPath = FileUtils.getHadoopPath(path)
val fileName = hadoopPath.getName
val metricDir = FileUtils.isLocalFile(path) match {
case true => Option(new File(path).getParentFile)
case false => None
}
log.info(s"Initializing Metric file $fileName")
try {
val metricConfig = parseFile(path)
Metric(metricConfig, metricDir, FilenameUtils.removeExtension(fileName))
} catch {
case e: FileNotFoundException => throw e
case e: Exception => throw MetorikkuInvalidMetricFileException(s"Failed to parse metric file $fileName", e)
}
}
private def parseFile(fileName: String): Configuration = {
FileUtils.getObjectMapperByExtension(fileName) match {
case Some(mapper) => {
mapper.registerModule(DefaultScalaModule)
mapper.readValue(FileUtils.readConfigurationFile(fileName), classOf[Configuration])
}
case None => throw MetorikkuInvalidMetricFileException(s"Unknown extension for file $fileName")
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/test/Mock.scala | package com.yotpo.metorikku.configuration.test
case class Mock(name: String, path: String, var streaming: Option[Boolean]) {
streaming = Option(streaming.getOrElse(false))
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/FileDateRange.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.configuration.job.input
import com.fasterxml.jackson.annotation.JsonProperty
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.file.FilesInput
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter}
import org.joda.time.{DateTime, Period}
import scala.collection.mutable
case class FileDateRange(template: String,
@JsonProperty("date_range") dateRange: DateRange,
options: Option[Map[String, String]],
schemaPath: Option[String],
format: Option[String]) extends InputConfig {
override def getReader(name: String): Reader = FilesInput(name,
dateRange.replace(template),
options,
schemaPath,
format)
}
class DateRange(@JsonProperty("format") _format: String,
@JsonProperty("startDate") _startDate: String,
@JsonProperty("endDate") _endDate: String) {
var format: DateTimeFormatter = DateTimeFormat.forPattern(_format)
var startDate: DateTime = format.parseDateTime(_startDate)
var endDate: DateTime = format.parseDateTime(_endDate)
require(startDate.isBefore(endDate), s"startDate:${startDate} must be earlier than endDate:${endDate}")
/**
* Generate a sequence of strings
*
* @param templateString example: "/analytics/user_agg/%s/"
* @return
*/
def replace(templateString: String): Seq[String] = {
val stringSequence = mutable.ArrayBuffer.empty[String]
val range = dateRange(startDate, endDate, Period.days(1))
range.foreach(dateTime => {
stringSequence.append(templateString.format(dateTime.toString(format)))
})
stringSequence
}
def dateRange(from: DateTime, to: DateTime, step: Period): Iterator[DateTime] =
Iterator.iterate(from)(_.plus(step)).takeWhile(!_.isAfter(to))
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/Redis.scala | <reponame>rluta/metorikku
package com.yotpo.metorikku.configuration.job.output
case class Redis(host: String,
port: Option[String],
auth: Option[String],
db: Option[String]) {
require(Option(host).isDefined, "Redis database connection: host is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/output/File.scala | package com.yotpo.metorikku.configuration.job.output
case class File(dir: String,
checkpointLocation: Option[String]) {
require(Option(dir).isDefined, "Output file directory: dir is mandatory.")
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/Periodic.scala | package com.yotpo.metorikku.configuration.job
import com.yotpo.metorikku.exceptions.MetorikkuException
import scala.concurrent.duration.Duration
case class Periodic(triggerDuration: Option[String]) {
def getTriggerDurationInSeconds(): Long = {
try {
Duration(triggerDuration.get).toSeconds
} catch {
case e: Exception => throw MetorikkuException("Invaiid periodic trigger duration", e)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/DeequFactory.scala | package com.yotpo.metorikku.metric
import com.yotpo.metorikku.metric.stepActions.dataQuality.DataQualityCheckList
import org.apache.log4j.Logger
class DeequFactory(log: Logger,
failedDFLocation: Option[String] = None,
ignoreDeequeValidations: Option[Boolean] = None) {
def generateDeequeList(dq: Option[DataQualityCheckList]
): Option[DataQualityCheckList] = {
ignoreDeequeValidations match {
case Some(true) => {
log.info("Skipping Deequ validations for this metric")
None
}
case _ => {
dq match {
case Some(value) => {
Option(value.copy(failedDfLocation = value.failedDfLocation.orElse(failedDFLocation)))
}
case None => None
}
}
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/input/readers/cassandra/CassandraInput.scala | <filename>src/main/scala/com/yotpo/metorikku/input/readers/cassandra/CassandraInput.scala
package com.yotpo.metorikku.input.readers.cassandra
import com.yotpo.metorikku.input.Reader
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.cassandra._
case class CassandraInput(name: String, host: String, user: Option[String],
password: Option[String], table: String, keySpace: String,
options: Option[Map[String, String]]) extends Reader {
def read(sparkSession: SparkSession): DataFrame = {
var cassandraOptions = Map("spark.cassandra.connection.host" -> host)
if (user.nonEmpty) {
cassandraOptions += ("spark.cassandra.auth.username" -> user.get)
}
if (password.nonEmpty) {
cassandraOptions += ("spark.cassandra.auth.password" -> password.get)
}
cassandraOptions ++= options.getOrElse(Map())
sparkSession.setCassandraConf(name, keySpace, cassandraOptions)
val dbTable = sparkSession.read.format("org.apache.spark.sql.cassandra").options(Map(
"table" -> table,
"keyspace" -> keySpace,
"cluster" -> name
))
dbTable.load()
}
}
|
rluta/metorikku | examples/udf/Example.scala | <filename>examples/udf/Example.scala
package com.yotpo.udf.test
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.udf
object TestUDF {
def addZPrefix(s: String): String = {
"Z" + s
}
def run(ss: org.apache.spark.sql.SparkSession, metricName: String, dataFrameName: String, params: Option[Map[String, String]]): Unit = {
ss.udf.register(dataFrameName, udf[String, String](addZPrefix))
}
def main(args: Array[String]): Unit = {
println("Metorikku UDF example")
}
} |
rluta/metorikku | src/main/scala/com/yotpo/metorikku/output/writers/jdbc/JDBCQueryWriter.scala | <filename>src/main/scala/com/yotpo/metorikku/output/writers/jdbc/JDBCQueryWriter.scala
package com.yotpo.metorikku.output.writers.jdbc
import java.sql.{Date, DriverManager, PreparedStatement, Timestamp}
import com.yotpo.metorikku.configuration.job.output.JDBC
import com.yotpo.metorikku.output.Writer
import org.apache.log4j.LogManager
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types.{ArrayType, BinaryType, MapType, StructType}
class JDBCQueryWriter(props: Map[String, String], config: Option[JDBC]) extends Writer {
case class JDBCQueryProperties(query: String, maxBatchSize: Int, minPartitions: Option[Int], maxPartitions: Option[Int])
@transient lazy val log = LogManager.getLogger(this.getClass)
val defaultMaxBatchSize = 500
val options = JDBCQueryProperties(props("query"),
props.getOrElse("maxBatchSize", defaultMaxBatchSize).asInstanceOf[Int],
props.get("minPartitions").asInstanceOf[Option[Int]],
props.get("maxPartitions").asInstanceOf[Option[Int]])
override def write(dataFrame: DataFrame): Unit = {
config match {
case Some(config) =>
alignPartitions(dataFrame, options.minPartitions, options.maxPartitions).
foreachPartition{ partition: Iterator[Row] =>
val conn = DriverManager.getConnection(config.connectionUrl, config.user, config.password)
val stmt = conn.prepareStatement(options.query)
partition.grouped(options.maxBatchSize).foreach(batch => {
batch.foreach(row => {
for (i <- 1 to row.size) {
addValueToStatement(row.get(i-1), stmt, i)
}
stmt.addBatch()
})
stmt.executeBatch()
})
stmt.close()
conn.close()
}
case None => log.error("JDBC QUERY file configuration were not provided")
}
}
// scalastyle:off cyclomatic.complexity
def addValueToStatement(v: Any, stmt: PreparedStatement, i: Int): Unit = {
v match {
case v: Boolean => stmt.setBoolean(i, v.asInstanceOf[Boolean])
case v: Byte => stmt.setByte(i, v.asInstanceOf[Byte])
case v: Short => stmt.setShort(i, v.asInstanceOf[Short])
case v: Int => stmt.setInt(i, v.asInstanceOf[Int])
case v: Float => stmt.setFloat(i, v.asInstanceOf[Float])
case v: Double => stmt.setDouble(i, v.asInstanceOf[Double])
case v: String => stmt.setString(i, v.asInstanceOf[String])
case v: BigDecimal => stmt.setBigDecimal(i, v.asInstanceOf[java.math.BigDecimal])
case v: Date => stmt.setDate(i, v.asInstanceOf[Date])
case v: Timestamp => stmt.setTimestamp(i, v.asInstanceOf[Timestamp])
case v: BinaryType => stmt.setBytes(i, v.asInstanceOf[Array[Byte]])
case v: ArrayType => stmt.setString(i, v.asInstanceOf[ArrayType].json)
case v: MapType => stmt.setString(i, v.asInstanceOf[MapType].json)
case v: StructType => stmt.setString(i, v.asInstanceOf[StructType].json)
// NULLs
case _ => stmt.setObject(i, v)
}
}
// scalastyle:on cyclomatic.complexity
def alignPartitions(dataFrame: DataFrame, minPartitions: Option[Int], maxPartitions: Option[Int] ): DataFrame = {
val current = dataFrame.rdd.getNumPartitions
if (minPartitions.isDefined && current < minPartitions.get) {
dataFrame.repartition(minPartitions.get)
} else if (maxPartitions.isDefined && current > maxPartitions.get) {
dataFrame.coalesce(maxPartitions.get)
} else {
dataFrame
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/test/EnrichedRow.scala | <filename>src/main/scala/com/yotpo/metorikku/test/EnrichedRow.scala
package com.yotpo.metorikku.test
import com.yotpo.metorikku.test.TestUtil.log
import org.apache.spark
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{StringType, StructField}
import scala.collection.JavaConverters._
import scala.collection.Seq
import scala.collection.immutable.ListMap
import scala.reflect.runtime.universe._
case class EnrichedRow(row: Map[String, Any], index: Int) {
def getRowSubsetByKeys(wantedKeys: Iterable[String]): EnrichedRow = {
if (row.keys != wantedKeys) {
val wantedRow = wantedKeys.map(key => key -> row(key)).toMap
EnrichedRow(wantedRow, index)
}
else {
this
}
}
def getRow(): Map[String, Any] = { row }
}
case class EnrichedRows(enrichedRows: List[EnrichedRow]) {
def size(): Int = {
enrichedRows.size
}
def sortWith(lt: (EnrichedRow, EnrichedRow) => Boolean): EnrichedRows = {
EnrichedRows(enrichedRows.sortWith(lt))
}
def zipWithIndex[EnrichedRow1 >: EnrichedRow, That]: List[(EnrichedRow, Int)] = {
enrichedRows.zipWithIndex
}
def getHeadRowKeys(): List[String] = {
enrichedRows.head.row.keys.toList
}
def getEnrichedRowByIndex(index: Int): EnrichedRow = {
enrichedRows.lift(index).get
}
def getSubTable(indexesToCollect: Seq[Int]): EnrichedRows = {
assert(indexesToCollect.nonEmpty)
EnrichedRows(indexesToCollect.map(index => enrichedRows(index)).toList)
}
def addAlignmentRow(columnToMaxLengthValueMap: Map[String, Int]): EnrichedRows = {
val whitespacesRow = columnToMaxLengthValueMap.map { case (columnName, maxColValLength) =>
val sb = new StringBuilder
for (_ <- 0 to maxColValLength) {
sb.append(" ")
}
columnName -> sb.toString
}
val alignmentEnrichedRow = EnrichedRow(whitespacesRow, enrichedRows.size)
EnrichedRows(enrichedRows :+ alignmentEnrichedRow)
}
def toDF(resultsType: ResultsType.Value,
schemaColumns: List[String], sparkSession: SparkSession): DataFrame = {
val reducedEnrichedRows = resultsType match {
case ResultsType.actual =>
enrichedRows.map(mapRes => mapRes.getRowSubsetByKeys(schemaColumns)) //remove undeclared columns
case _ => enrichedRows
}
val rowIdField = "row_number"
val mapWithIndexes = reducedEnrichedRows.map(enrichedRow => {
ListMap[String, String](rowIdField -> (enrichedRow.index + 1).toString) ++
enrichedRow.row.mapValues { v => if (v == null) "" else v.toString }
})
val allSchemaKeys = (rowIdField +: schemaColumns)
val rowsOrdered = mapWithIndexes.map(m => allSchemaKeys.map(column => m(column)))
val rows = rowsOrdered.map(m => spark.sql.Row(m: _*))
val x: java.util.List[Row] = rows.asJava
val schema = org.apache.spark.sql.types.StructType(allSchemaKeys.map(fieldName => StructField(fieldName, StringType, nullable = true)))
sparkSession.createDataFrame(x, schema)
}
def logErrorByResType(resType: ResultsType.Value, indexesOfErroredRows: Seq[Int],
columns: List[String], sparkSession: SparkSession, tableName: String): Unit = {
log.warn(s"********************** $tableName $resType results with Mismatches ************************")
val indexesToCollect = indexesOfErroredRows.sorted
val subtableErrored = getSubTable(indexesToCollect)
val subDF = subtableErrored.toDF(resType, columns, sparkSession)
log.warn(TestUtil.dfToString(TestUtil.replaceColVal(subDF, "row_number", size().toString, " "), indexesOfErroredRows.size + 1, truncate = false))
}
}
object EnrichedRows {
def apply(allRows: List[Map[String, Any]])(implicit tag: TypeTag[Any]): EnrichedRows = {
EnrichedRows(allRows.zipWithIndex.map { case (row, index) => EnrichedRow(row, index) })
}
def logSubtableErrors(sortedExpectedResults: EnrichedRows, sortedActualResults: EnrichedRows,
errorsIndexArrExpected: Seq[Int], errorsIndexArrActual: Seq[Int], redirectDfShowToLogger: Boolean,
sparkSession: SparkSession, tableName: String): Unit = {
val expectedCols = sortedExpectedResults.getHeadRowKeys()
if (errorsIndexArrExpected.nonEmpty) {
sortedExpectedResults.logErrorByResType(ResultsType.expected, errorsIndexArrExpected, expectedCols, sparkSession, tableName)
if (errorsIndexArrActual.nonEmpty) {
sortedActualResults.logErrorByResType(ResultsType.actual, errorsIndexArrActual, expectedCols, sparkSession, tableName)
}
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/MetricReporting.scala | package com.yotpo.metorikku.metric
import java.util.concurrent.TimeUnit
import com.yotpo.metorikku.exceptions.{MetorikkuException, MetorikkuWriteFailedException}
import com.yotpo.metorikku.instrumentation.InstrumentationProvider
import org.apache.log4j.LogManager
import org.apache.spark.sql.DataFrame
class MetricReporting {
val log = LogManager.getLogger(this.getClass)
def getMaxDataframeTime(dataFrame: DataFrame, reportLagTimeColumn: Option[String],
reportLagTimeColumnUnits:Option[String]): Long ={
reportLagTimeColumn match {
case Some(timeColumn) => {
dataFrame.cache()
try {
reportLagTimeColumnUnits match {
case Some(units) => TimeUnit.valueOf(units) match {
case TimeUnit.MILLISECONDS => TimeUnit.MILLISECONDS.toMillis(dataFrame.agg({timeColumn -> "max"}).collect()(0).getLong(0))
case TimeUnit.SECONDS => TimeUnit.SECONDS.toMillis(dataFrame.agg({timeColumn -> "max"}).collect()(0).getLong(0))
case _ => throw MetorikkuException("Unsupported time unit type " + TimeUnit.valueOf(units))
}
case _ => dataFrame.agg({timeColumn -> "max"}).collect()(0).getTimestamp(0).getTime()
}
} catch {
case e: ClassCastException => throw new ClassCastException(s"Lag instrumentation column -${timeColumn} " +
s"cannot be cast to spark.sql.Timestamp or spark.sql.Long")
case e: IllegalArgumentException => throw new MetorikkuWriteFailedException(
s"${reportLagTimeColumnUnits} is not a legal argument for units, use one of the following: [SECONDS,MILLISECONDS]")
}
}
case _=> throw MetorikkuWriteFailedException("Failed to report lag time, reportLagTimeColumn is not defined")
}
}
def reportLagTime(dataFrame: DataFrame, reportLagTimeColumn: Option[String],
reportLagTimeColumnUnits:Option[String],
instrumentationProvider: InstrumentationProvider) : Unit ={
dataFrame.isEmpty match {
case false => {
val maxDataframeTime = getMaxDataframeTime(dataFrame, reportLagTimeColumn, reportLagTimeColumnUnits)
log.info(s"Max column ${reportLagTimeColumn} value is ${maxDataframeTime} for ${dataFrame}")
val lag = System.currentTimeMillis - maxDataframeTime
log.info(s"Reporting lag value: ${lag} for ${dataFrame}")
instrumentationProvider.gauge(name = "lag", lag)
}
case true => instrumentationProvider.gauge(name = "lag", 0)
}
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/instrumentation/StreamingQueryMetricsListener.scala | <filename>src/main/scala/com/yotpo/metorikku/instrumentation/StreamingQueryMetricsListener.scala
package com.yotpo.metorikku.instrumentation
import org.apache.log4j.{LogManager, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.StreamingQueryListener
import org.apache.spark.sql.streaming.StreamingQueryListener._
object StreamingQueryMetricsListener {
val log: Logger = LogManager.getLogger(this.getClass)
def init(sparkSession: SparkSession, instrumentationProvider: InstrumentationProvider): Unit = {
val listener = new StreamingQueryMetricsListener(instrumentationProvider)
sparkSession.streams.addListener(listener)
log.info(s"Initialize stream listener")
}
}
class StreamingQueryMetricsListener(instrumentationProvider: InstrumentationProvider) extends StreamingQueryListener {
@transient lazy val log = org.apache.log4j.LogManager.getLogger(this.getClass)
def onQueryStarted(event: QueryStartedEvent): Unit = {
}
def onQueryTerminated(event: QueryTerminatedEvent): Unit = {
event.exception match {
case Some(e) =>
instrumentationProvider.count(name = "QueryExceptionCounter", value = 1)
log.error("Query failed with exception: " + e)
case None =>
instrumentationProvider.count(name = "QueryStopCounter", value = 1)
}
}
def onQueryProgress(event: QueryProgressEvent): Unit = {
val numInputRows = event.progress.numInputRows
instrumentationProvider.gauge(name = "InputEventsCount", value = numInputRows)
val processedRowsPerSecond = event.progress.processedRowsPerSecond
instrumentationProvider.gauge(name = "ProcessedEventsPerSecond", value = processedRowsPerSecond.toLong)
}
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/utils/HudiUtils.scala | package com.yotpo.metorikku.utils
import org.apache.hudi.avro.model.HoodieCompactionPlan
import org.apache.hudi.common.table.timeline.HoodieInstant
import org.apache.hudi.common.table.timeline.HoodieInstant.State
import org.apache.hudi.common.table.{HoodieTableMetaClient, HoodieTimeline}
import org.apache.hudi.common.util.CompactionUtils
import org.apache.hudi.common.util.collection.ImmutablePair
import org.apache.hudi.config.HoodieWriteConfig
import org.apache.hudi.exception.TableNotFoundException
import org.apache.hudi.table.HoodieTable
import org.apache.log4j.LogManager
import org.apache.spark.SparkContext
import org.apache.spark.api.java.JavaSparkContext
object HudiUtils {
val log = LogManager.getLogger(this.getClass)
def deletePendingCompactions(sparkContext: SparkContext, basePath: String): Unit = {
try {
val jsc = JavaSparkContext.fromSparkContext(sparkContext)
val hudiMetaclient = new HoodieTableMetaClient(sparkContext.hadoopConfiguration, basePath)
val writerConfig = HoodieWriteConfig.newBuilder().withPath(basePath).build()
val hudiTable = HoodieTable.getHoodieTable(hudiMetaclient, writerConfig, jsc)
val pendingCompactionPlans = CompactionUtils.getAllPendingCompactionPlans(hudiMetaclient)
val activeTimeline = hudiTable.getActiveTimeline()
pendingCompactionPlans.toArray().foreach({ pendingCompactionPlan => {
val inflightInstant = pendingCompactionPlan.asInstanceOf[ImmutablePair[HoodieInstant, HoodieCompactionPlan]].getLeft
log.info(s"Deleting pending inflight compaction: ${inflightInstant.getFileName}")
activeTimeline.deleteInflight(inflightInstant)
val compactionRequestedInstant = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, inflightInstant.getTimestamp);
log.info(s"Deleting pending compaction requested: ${compactionRequestedInstant.getFileName}")
activeTimeline.deleteCompactionRequested(compactionRequestedInstant)
}
})
}
catch {
case e: TableNotFoundException => log.info(s"Cannot delete pending compaction: table has yet been created", e)
case e: Throwable => throw e
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.