repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
---|---|---|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/Kafka.scala | package com.yotpo.metorikku.configuration.job.input
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.kafka.KafkaInput
case class Kafka(servers: Seq[String],
topic: Option[String],
topicPattern: Option[String],
consumerGroup: Option[String],
options: Option[Map[String, String]],
schemaRegistryUrl: Option[String],
schemaSubject: Option[String],
schemaId: Option[String]
) extends InputConfig {
require(Option(servers).isDefined, "Servers Must be Defined")
require(topic.isDefined && !topicPattern.isDefined || !topic.isDefined &&
topicPattern.isDefined &&
schemaSubject.isDefined,
"Exactly one of (topic, topicPattern) must be defined")
override def getReader(name: String): Reader = KafkaInput(name, servers, topic, topicPattern, consumerGroup, options,
schemaRegistryUrl, schemaSubject, schemaId)
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/configuration/job/input/JDBC.scala | <gh_stars>100-1000
package com.yotpo.metorikku.configuration.job.input
import com.yotpo.metorikku.configuration.job.InputConfig
import com.yotpo.metorikku.input.Reader
import com.yotpo.metorikku.input.readers.jdbc.JDBCInput
case class JDBC(connectionUrl: String,
user: String,
password: String,
table: String,
options: Option[Map[String, String]]
) extends InputConfig {
require(Option(connectionUrl).isDefined, "JDBC connection: connection url is mandatory")
require(Option(user).isDefined, "JDBC connection: user is mandatory")
require(Option(password).isDefined, "JDBC connection: password is mandatory")
require(Option(table).isDefined, "JDBC connection: table is mandatory")
override def getReader(name: String): Reader = JDBCInput(name, connectionUrl, user, password, table, options)
}
|
rluta/metorikku | src/main/scala/com/yotpo/metorikku/metric/stepActions/dataQuality/DataQualityCheckList.scala | package com.yotpo.metorikku.metric.stepActions.dataQuality
case class DataQualityCheckList(checks: List[DataQualityCheck],
level: Option[String],
cacheDf: Option[Boolean],
failedDfLocation: Option[String] = None) {
def runChecks(dfName: String): Unit = {
ValidationRunner().runChecks(dfName, checks, level, cacheDf, failedDfLocation)
}
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/http/IncomingMessage.scala | <filename>js/src/main/scala/fr/hmil/roshttp/node/http/IncomingMessage.scala<gh_stars>100-1000
package fr.hmil.roshttp.node.http
import fr.hmil.roshttp.node.events.EventEmitter
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobal
@js.native
@JSGlobal
private[roshttp] class IncomingMessage extends EventEmitter {
val headers: js.Dictionary[String] = js.native
val httpVersion: String = js.native
val method: String = js.native
val rawHeaders: js.Dictionary[String] = js.native
val rawTrailers: js.Dictionary[String] = js.native
def setTimeout(msecs: Int, callback: js.Function): IncomingMessage = js.native
val statusCode: Int = js.native
val statusMessage: String = js.native
// message.socket -- not facaded here
val trailers: js.Dictionary[String] = js.native
val url: String = js.native
def pause(): Unit = js.native
def resume(): Unit = js.native
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/DriverTrait.scala | package fr.hmil.roshttp
import fr.hmil.roshttp.response.{HttpResponse, HttpResponseFactory}
import monix.execution.Scheduler
import scala.concurrent.Future
private trait DriverTrait {
def send[T <: HttpResponse](req: HttpRequest, factory: HttpResponseFactory[T])(implicit scheduler: Scheduler):
Future[T]
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/util/Utils.scala | package fr.hmil.roshttp.util
import java.nio.ByteBuffer
import fr.hmil.roshttp.CrossPlatformUtils
object Utils {
/**
* Extracts the charset from a content-type header string
* @param input content-type header value
* @return the charset contained in the content-type or the default
* one-byte encoding charset (to avoid tampering binary buffer).
*/
def charsetFromContentType(input: String): String = {
if (input == null) {
oneByteCharset
} else {
// From W3C spec:
// Content-Type := type "/" subtype *[";" parameter]
// eg: text/html; charset=UTF-8
input.split(';').toStream.drop(1).foldLeft(oneByteCharset)((acc, s) => {
if (s.matches("^\\s*charset=.+$")) {
s.substring(s.indexOf("charset") + 8)
} else {
acc
}
})
}
}
/** urlencodes a query string by preserving key-value pairs. */
def encodeQueryString(queryString: String): String = {
queryString
.split("&")
.map(_
.split("=")
.map(encodeURIComponent)
.mkString("="))
.mkString("&")
}
def encodeURIComponent(input: String): String = CrossPlatformUtils.encodeURIComponent(input)
def getStringFromBuffer(byteBuffer: ByteBuffer, charset: String): String = {
if (byteBuffer.hasArray) {
new String(byteBuffer.array(), 0, byteBuffer.limit, charset)
} else {
val tmp = new Array[Byte](byteBuffer.limit)
byteBuffer.get(tmp)
new String(tmp, charset)
}
}
private val oneByteCharset = "utf-8"
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/response/HttpResponseFactory.scala | <filename>shared/src/main/scala/fr/hmil/roshttp/response/HttpResponseFactory.scala
package fr.hmil.roshttp.response
import java.nio.ByteBuffer
import fr.hmil.roshttp.BackendConfig
import fr.hmil.roshttp.util.HeaderMap
import monix.execution.Scheduler
import monix.reactive.Observable
import scala.concurrent.Future
private[roshttp] trait HttpResponseFactory[T <: HttpResponse] {
def apply(
header: HttpResponseHeader,
bodyStream: Observable[ByteBuffer],
config: BackendConfig)
(implicit scheduler: Scheduler): Future[T]
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/exceptions/HttpException.scala | <reponame>ThorsMjollnir/RosHTTP
package fr.hmil.roshttp.exceptions
import java.io.IOException
import fr.hmil.roshttp.response.{HttpResponse, SimpleHttpResponse, StreamHttpResponse}
/** Exception in the HTTP application layer.
*
* In other words, this exception occurs when a bad HTTP status code (>= 400) is received.
*/
case class HttpException[+T <: HttpResponse] private(response: T)(message: String = null)
extends IOException(message)
object HttpException {
def badStatus[T <: HttpResponse](response: T): HttpException[T] =
new HttpException[T](response)(s"Server responded with status ${response.statusCode}")
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/response/HttpResponse.scala | <gh_stars>0
package fr.hmil.roshttp.response
import fr.hmil.roshttp.util.HeaderMap
private[roshttp] trait HttpResponse {
val statusCode: Int
val headers: HeaderMap[String]
val body: Any
} |
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/exceptions/UploadStreamException.scala | package fr.hmil.roshttp.exceptions
import java.io.IOException
/** Captures errors in the request body stream.
*
* This exception means that the stream which feeds request body data into the request broke.
*/
case class UploadStreamException(cause: Throwable)
extends IOException("An error occurred upstream while sending request data.", cause) |
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/NodeDriver.scala | package fr.hmil.roshttp
import java.io.IOException
import java.nio.ByteBuffer
import fr.hmil.roshttp.ByteBufferQueue.Feeder
import fr.hmil.roshttp.exceptions.{HttpException, RequestException, UploadStreamException}
import fr.hmil.roshttp.node.Modules.{http, https}
import fr.hmil.roshttp.node.buffer.Buffer
import fr.hmil.roshttp.node.http.{IncomingMessage, RequestOptions}
import fr.hmil.roshttp.response.{HttpResponse, HttpResponseFactory, HttpResponseHeader}
import fr.hmil.roshttp.util.HeaderMap
import monix.execution.Ack.Continue
import monix.execution.{Ack, Scheduler}
import monix.reactive.Observer
import scala.concurrent.{Future, Promise}
import scala.scalajs.js
import scala.scalajs.js.JSConverters._
import scala.scalajs.js.JavaScriptException
private object NodeDriver extends DriverTrait {
def makeRequest[T <: HttpResponse](req: HttpRequest, factory: HttpResponseFactory[T], p: Promise[T])
(implicit scheduler: Scheduler): Unit = {
val module = {
if (req.protocol == Protocol.HTTP)
http
else
https
}
var headers = req.headers
if (!req.backendConfig.allowChunkedRequestBody) {
headers += "Transfer-Encoding" -> ""
}
val nodeRequest = module.request(RequestOptions(
hostname = req.host,
port = req.port.orUndefined,
method = req.method.toString,
headers = js.Dictionary(headers.toSeq: _*),
path = req.longPath
), handleResponse(req, factory, p)_)
nodeRequest.on("error", { (s: js.Dynamic) =>
p.tryFailure(RequestException(new IOException(s.toString)))
()
})
if (req.body.isDefined) {
req.body.foreach({ part =>
part.content.subscribe(new Observer[ByteBuffer] {
override def onError(ex: Throwable): Unit = {
p.tryFailure(UploadStreamException(ex))
nodeRequest.abort()
}
override def onComplete(): Unit = {
nodeRequest.end()
}
override def onNext(elem: ByteBuffer): Future[Ack] = {
nodeRequest.write(Converters.byteBufferToNodeBuffer(elem))
Continue
}
})
})
} else {
nodeRequest.end()
}
}
def handleResponse[T <: HttpResponse](req:HttpRequest, factory: HttpResponseFactory[T], p: Promise[T])
(message: IncomingMessage)(implicit scheduler: Scheduler): Unit = {
if (message.statusCode >= 300 && message.statusCode < 400 && message.headers.contains("location")) {
makeRequest(req.withURL(message.headers("location")), factory, p)
} else {
val headers = message.headers.toMap[String, String]
val bufferQueue = new ByteBufferQueue(req.backendConfig.internalBufferLength,
new Feeder {
override def onFlush(): Unit = message.resume()
override def onFull(): Unit = message.pause()
})
message.on("data", { (nodeBuffer: js.Dynamic) =>
convertAndChopBuffer(nodeBuffer, req.backendConfig.maxChunkSize).foreach(bufferQueue.push)
})
message.on("end", { (s: js.Dynamic) =>
bufferQueue.end()
})
message.on("error", { (s: js.Dynamic) =>
bufferQueue.pushError(JavaScriptException(s))
})
p.completeWith(
factory(new HttpResponseHeader(message.statusCode, HeaderMap(headers)),
bufferQueue.observable,
req.backendConfig)
.map({ response =>
if (message.statusCode < 400) {
response
} else {
throw HttpException.badStatus(response)
}
}))
}
()
}
def send[T <: HttpResponse](req: HttpRequest, factory: HttpResponseFactory[T])(implicit scheduler: Scheduler):
Future[T] = {
val p: Promise[T] = Promise[T]()
makeRequest(req, factory, p)
p.future
}
private def convertAndChopBuffer(nodeBuffer: js.Any, maxChunkSize: Int): Seq[ByteBuffer] = {
val buffer = Converters.nodeBufferToByteBuffer(nodeBuffer.asInstanceOf[Buffer])
ByteBufferChopper.chop(buffer, maxChunkSize)
}
}
|
ThorsMjollnir/RosHTTP | build.sbt | <filename>build.sbt
name := "RösHTTP root project"
crossScalaVersions := Seq("2.10.6", "2.11.11", "2.12.2")
lazy val root = project.in(file(".")).
aggregate(scalaHttpJS, scalaHttpJVM)
lazy val scalaHttp = crossProject.in(file("."))
//.configureCross(InBrowserTesting.cross)
.settings(
name := "roshttp",
version := "2.0.2",
scalaVersion := "2.11.11",
organization := "fr.hmil",
licenses := Seq("MIT" -> url("https://opensource.org/licenses/MIT")),
homepage := Some(url("http://github.com/hmil/RosHTTP")),
publishMavenStyle := true,
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
pomExtra := (
<scm>
<url><EMAIL>:hmil/RosHTTP.git</url>
<connection>scm:git:<EMAIL>:hmil/RosHTTP.git</connection>
</scm>
<developers>
<developer>
<id>hmil</id>
<name><NAME></name>
<url>https://github.com/hmil/</url>
</developer>
</developers>
),
pomIncludeRepository := { _ => false },
libraryDependencies += "com.lihaoyi" %%% "utest" % "0.4.5" % Test,
libraryDependencies += "io.monix" %%% "monix" % "2.3.0",
testFrameworks += new TestFramework("utest.runner.Framework")
)
.jvmSettings(
// jvm-specific settings
)
.jsSettings(
// js-specific settings
libraryDependencies += "org.scala-js" %%% "scalajs-dom" % "0.9.1",
jsEnv := NodeJSEnv().value
)
lazy val scalaHttpJVM = scalaHttp.jvm
lazy val scalaHttpJS = scalaHttp.js
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/http/Http.scala | package fr.hmil.roshttp.node.http
import scala.scalajs.js
import scala.scalajs.js.annotation.{JSGlobal, JSName}
/**
* The node http API.
*
* Server-related stuff not included.
* createClient not included because it is deprecated.
*
* @see https://nodejs.org/api/http.html
*/
@js.native
private[roshttp] trait Http extends js.Object{
/**
* A list of the HTTP methods that are supported by the parser.
*/
val METHODS: Seq[String] = js.native
/**
* A collection of all the standard HTTP response status codes, and the short
* description of each. For example, http.STATUS_CODES[404] === 'Not Found'.
*/
val STATUS_CODES: Map[Number, String] = js.native
/**
* Global instance of Agent which is used as the default for all http client requests.
*/
val globalAgent: Agent = js.native
// http.createServer([requestListener]) -- server-side stuff, not needed in this project
// http.createClient([port][, host]) -- deprecated API, not implemented
/**
* Since most requests are GET requests without bodies, Node.js provides this convenience
* method. The only difference between this method and http.request() is that it sets the
* method to GET and calls req.end() automatically.
*/
def get(url: String): ClientRequest = js.native
def get(url: String, cb: js.Function1[IncomingMessage, Unit]): ClientRequest = js.native
def get(options: RequestOptions): ClientRequest = js.native
def get(options: RequestOptions, cb: js.Function1[IncomingMessage, Unit]): ClientRequest = js.native
/**
* Node.js maintains several connections per server to make HTTP requests. This function
* allows one to transparently issue requests.
* options can be an object or a string. If options is a string, it is automatically
* parsed with url.parse().
*/
def request(url: String): ClientRequest = js.native
def request(url: String, cb: js.Function1[IncomingMessage, Unit]): ClientRequest = js.native
def request(options: RequestOptions): ClientRequest = js.native
def request(options: RequestOptions, cb: js.Function1[IncomingMessage, Unit]): ClientRequest = js.native
}
@js.native
@JSGlobal("http")
private[roshttp] object Http extends Http
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/CrossPlatformUtils.scala | package fr.hmil.roshttp
import scala.scalajs.js
private object CrossPlatformUtils {
def encodeURIComponent(query: String): String =
js.URIUtils.encodeURIComponent(query)
def decodeURIComponent(query: String): String =
js.URIUtils.decodeURIComponent(query)
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/body/PlainTextBody.scala | <gh_stars>0
package fr.hmil.roshttp.body
import java.nio.ByteBuffer
/** Plain text body sent as `text/plain` mime type.
*
* @param text The plain text to send
* @param charset Charset used for encoded (defaults to utf-8)
*/
class PlainTextBody private(
text: String,
charset: String
) extends BulkBodyPart {
override def contentType: String = "text/plain; charset=" + charset
override def contentData: ByteBuffer = ByteBuffer.wrap(text.getBytes(charset))
}
object PlainTextBody {
def apply(text: String, charset: String = "utf-8"): PlainTextBody = new PlainTextBody(text, charset)
} |
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/body/BodyPart.scala | package fr.hmil.roshttp.body
import java.nio.ByteBuffer
import monix.reactive.Observable
trait BodyPart {
def contentType: String
def content: Observable[ByteBuffer]
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/http/ClientRequest.scala | package fr.hmil.roshttp.node.http
import fr.hmil.roshttp.node.events.EventEmitter
import fr.hmil.roshttp.node.buffer.Buffer
import scala.scalajs.js
/**
* Complete nodejs http ClientRequest API facade
*/
@js.native
private[roshttp] class ClientRequest extends EventEmitter {
/**
* Marks the request as aborting. Calling this will cause remaining data in
* the response to be dropped and the socket to be destroyed.
*/
def abort(): Unit = js.native
/**
* Finishes sending the request. If any parts of the body are unsent, it will
* flush them to the stream. If the request is chunked, this will send the
* terminating '0\r\n\r\n'.
*
* If data is specified, it is equivalent to calling response.write(data, encoding)
* followed by request.end(callback).
*
* If callback is specified, it will be called when the request stream is finished.
*/
def end(): Unit = js.native
def end(data: Buffer): Unit = js.native
def end(data: Buffer, callback: js.Function0[Unit]): Unit = js.native
def end(data: String): Unit = js.native
def end(data: String, encoding: String): Unit = js.native
def end(data: String, callback: js.Function0[Unit]): Unit = js.native
def end(data: String, encoding: String, callback: js.Function0[Unit]): Unit = js.native
def end(callback: js.Function0[Unit]): Unit = js.native
/**
* Flush the request headers.
*
* For efficiency reasons, Node.js normally buffers the request headers until
* you call request.end() or write the first chunk of request data. It then tries
* hard to pack the request headers and data into a single TCP packet.
*
* That's usually what you want (it saves a TCP round-trip) but not when the
* first data isn't sent until possibly much later. request.flushHeaders() lets
* you bypass the optimization and kickstart the request.
*/
def flushHeaders(): Unit = js.native
/**
* Once a socket is assigned to this request and is connected socket.setNoDelay()
* will be called.
*/
def setNoDelay(noDelay: Boolean): Unit = js.native
def setNoDelay(): Unit = js.native
/**
* Once a socket is assigned to this request and is connected socket.setKeepAlive() will be called.
*/
def setSocketKeepAlive(enable: Boolean, initialDelay: Int): Unit = js.native
def setSocketKeepAlive(enable: Boolean): Unit = js.native
def setSocketKeepAlive(initialDelay: Int): Unit = js.native
/**
* Once a socket is assigned to this request and is connected socket.setTimeout() will be called.
*
* @param timeout Milliseconds before a request is considered to be timed out.
* @param callback Optional function to be called when a timeout occurs. Same as binding to the timeout event.
*/
def setTimeout(timeout: Int, callback: js.Function0[Unit]): Unit = js.native
def setTimeout(timeout: Int): Unit = js.native
/**
* Sends a chunk of the body. By calling this method many times, the user can stream
* a request body to aserver--in that case it is suggested to use the ['Transfer-Encoding', 'chunked']
* header line when creating the request.
*
* @param chunk should be a Buffer or a string.
* @param encoding optional and only applies when chunk is a string. Defaults to 'utf8'.
* @param callback optional and will be called when this chunk of data is flushed.
* @return
*/
def write(chunk: String, encoding: String, callback: js.Function0[Unit]): ClientRequest = js.native
def write(chunk: String): ClientRequest = js.native
def write(chunk: String, encoding: String): ClientRequest = js.native
def write(chunk: String, callback: js.Function0[Unit]): ClientRequest = js.native
def write(chunk: Buffer): ClientRequest = js.native
def write(chunk: Buffer, callback: js.Function0[Unit]): ClientRequest = js.native
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/Method.scala | <gh_stars>100-1000
package fr.hmil.roshttp
/** Wraps HTTP method strings. */
final class Method private(private val name: String) {
override def toString: String = name.toUpperCase
override def equals(o: Any): Boolean = o match {
case that: Method => that.name.equalsIgnoreCase(this.name)
case _ => false
}
override def hashCode: Int = name.toUpperCase.hashCode
}
/** Exposes available methods as object as well as an implicit conversion
* from string to Method objects.
*
* Because all backends do not support all methods, this library imposes a subset
* of all available HTTP Methods. Should you find a use case for this library
* with other HTTP methods, please submit an issue with your motivation.
*/
object Method {
val GET = Method("GET")
val POST = Method("POST")
val HEAD = Method("HEAD")
val OPTIONS = Method("OPTIONS")
val PUT = Method("PUT")
val DELETE = Method("DELETE")
/** The PATCH HTTP method does not work on the JVM */
val PATCH = Method("PATCH")
/** The TRACE HTTP method does not work in the browser */
val TRACE = Method("TRACE")
/** Creates a custom http method.
*
* Support for custom methods depends on the backend so use at your own risk!
*
* @param name method name
*/
def apply(name: String): Method = new Method(name)
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/response/SimpleHttpResponse.scala | <reponame>ThorsMjollnir/RosHTTP
package fr.hmil.roshttp.response
import java.nio.ByteBuffer
import fr.hmil.roshttp.BackendConfig
import fr.hmil.roshttp.exceptions.ResponseException
import fr.hmil.roshttp.util.{HeaderMap, Utils}
import monix.execution.Scheduler
import monix.reactive.Observable
import scala.collection.mutable
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
/**
* An HTTP response obtained via an [[fr.hmil.roshttp.HttpRequest]]
*/
class SimpleHttpResponse(
val statusCode: Int,
val headers: HeaderMap[String],
val body: String)
extends HttpResponse
object SimpleHttpResponse extends HttpResponseFactory[SimpleHttpResponse] {
override def apply(
header: HttpResponseHeader,
bodyStream: Observable[ByteBuffer],
config: BackendConfig)
(implicit scheduler: Scheduler): Future[SimpleHttpResponse] = {
val charset = Utils.charsetFromContentType(header.headers.getOrElse("content-type", null))
val buffers = mutable.Queue[ByteBuffer]()
val promise = Promise[SimpleHttpResponse]()
val streamCollector = bodyStream.
foreach(elem => buffers.enqueue(elem)).
map({_ =>
val body = recomposeBody(buffers, config.maxChunkSize, charset)
new SimpleHttpResponse(header.statusCode, header.headers, body)
})
streamCollector.onComplete({
case res:Success[SimpleHttpResponse] =>
promise.trySuccess(res.value)
case e:Failure[_] =>
promise.tryFailure(new ResponseException(e.exception, header))
})
promise.future
}
private def recomposeBody(seq: mutable.Queue[ByteBuffer], maxChunkSize: Int, charset: String): String = {
// Allocate maximum expected body length
val buffer = ByteBuffer.allocate(seq.length * maxChunkSize)
val totalBytes = seq.foldLeft(0)({ (count, chunk) =>
buffer.put(chunk)
count + chunk.limit
})
buffer.limit(totalBytes)
Utils.getStringFromBuffer(buffer, charset)
}
} |
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/body/MultiPartBody.scala | <filename>shared/src/main/scala/fr/hmil/roshttp/body/MultiPartBody.scala
package fr.hmil.roshttp.body
import java.nio.ByteBuffer
import monix.execution.Scheduler
import monix.reactive.Observable
import scala.util.Random
/** A body made of multiple parts.
*
* <b>Usage:</b> A multipart body acts as a container for other bodies. For instance,
* the multipart body is commonly used to send a form with binary attachments in conjunction with
* the [[ByteBufferBody]].
* For simple key/value pairs, use [[URLEncodedBody]] instead.
*
* <b>Safety consideration:</b> A random boundary is generated to separate parts. If the boundary was
* to occur within a body part, it would mess up the whole body. In practice, the odds are extremely small though.
*
* @param parts The pieces of body. The key in the map is used as `name` for the `Content-Disposition` header
* of each part.
* @param subtype The exact multipart mime type as in `multipart/subtype`. Defaults to `form-data`.
*/
class MultiPartBody(parts: Map[String, BodyPart], subtype: String = "form-data")(implicit scheduler: Scheduler)
extends BodyPart {
val boundary = "----" + Random.alphanumeric.take(24).mkString.toLowerCase
override def contentType: String = s"multipart/$subtype; boundary=$boundary"
override def content: Observable[ByteBuffer] = {
parts.
// Prepend multipart encapsulation boundary and body part headers to
// each body part.
map({ case (name, part) =>
ByteBuffer.wrap(
("\r\n--" + boundary + "\r\n" +
"Content-Disposition: form-data; name=\"" + name + "\"\r\n" +
s"Content-Type: ${part.contentType}\r\n" +
"\r\n").getBytes("utf-8")
) +: part.content
}).
// Join body parts
reduceLeft((acc, elem) => acc ++ elem).
// Append the closing boundary
:+(ByteBuffer.wrap(s"\r\n--$boundary--\r\n".getBytes("utf-8")))
}
}
object MultiPartBody {
def apply(parts: (String, BodyPart)*)(implicit scheduler: Scheduler): MultiPartBody =
new MultiPartBody(Map(parts: _*))
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/exceptions/TimeoutException.scala | package fr.hmil.roshttp.exceptions
import java.io.IOException
import fr.hmil.roshttp.response.HttpResponseHeader
/** Captures timeout exceptions occurring during an HTTP response. */
case class TimeoutException(header: Option[HttpResponseHeader] = None)
extends IOException("HTTP response timed out.")
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/Helpers.scala | package fr.hmil.roshttp.node
import scala.scalajs.js
import scala.scalajs.js.JavaScriptException
/**
* collection of helper functions for nodejs related stuff
*/
private[roshttp] object Helpers {
/**
* Tests whether the current environment is commonjs-like
*
* @return true if the function "require" is available on the global scope
*/
def isRequireAvailable: Boolean = !js.isUndefined(js.Dynamic.global.selectDynamic("require"))
/**
* Gets javascript module using either require() or the global context
*
* @param module Module descriptor
* @tparam T Module API facade type
* @return The requested module as a scala facade
*/
def require[T](module: Module[T]): Option[T] = {
if (!js.isUndefined(module.inst)) {
Some(module.inst)
} else if (isRequireAvailable) {
try {
Some(Global.require[T](module.name))
} catch {
case _: JavaScriptException => None
}
} else {
None
}
}
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/body/BulkBodyPart.scala | package fr.hmil.roshttp.body
import java.nio.ByteBuffer
import monix.reactive.Observable
abstract class BulkBodyPart extends BodyPart {
override def content: Observable[ByteBuffer] = Observable.eval(contentData)
def contentData: ByteBuffer
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/Module.scala | <reponame>ThorsMjollnir/RosHTTP
package fr.hmil.roshttp.node
private[roshttp] abstract class Module[T](val name: String, val inst: T) {
def isAvailable: Boolean = required.isDefined
def required: Option[T] = Helpers.require(this)
lazy val api = required.getOrElse(throw new ModuleNotFoundException(name))
}
private[roshttp] class ModuleNotFoundException(name: String) extends RuntimeException("Module " + name + " not found")
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/ByteBufferQueue.scala | <filename>shared/src/main/scala/fr/hmil/roshttp/ByteBufferQueue.scala
package fr.hmil.roshttp
import java.nio.ByteBuffer
import fr.hmil.roshttp.ByteBufferQueue.Feeder
import monix.execution.{Ack, Cancelable}
import monix.execution.Ack.{Continue, Stop}
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.collection.mutable
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success, Try}
/**
* Mutable queue of byteBuffers which acts as a buffer between a source and a
* sink. This queue guarantees that all operations (data and errors) exit in
* the same order as they entered.
*/
private[roshttp] class ByteBufferQueue(
private val capacity: Int,
private val feeder: Feeder = ByteBufferQueue.noopFeeder)
(implicit ec: ExecutionContext) {
private var subscriber: Option[Subscriber[ByteBuffer]] = None
private val bufferQueue = mutable.Queue[ByteBuffer]()
private var hasEnd = false
private var isWaitingForAck = false
private var error: Throwable = _
private val cancelable = new Cancelable {
override def cancel(): Unit = stop()
}
def propagate(): Unit = subscriber.foreach({ subscriber =>
if (!isWaitingForAck) {
if (bufferQueue.nonEmpty) {
isWaitingForAck = true
val wasFull = isFull
subscriber.onNext(bufferQueue.dequeue()).onComplete(handleAck)
if (wasFull) {
feeder.onFlush()
}
} else if (hasEnd) {
if (error != null) {
subscriber.onError(error)
}
stop()
}
}
})
def handleAck(ack: Try[Ack]): Unit = {
isWaitingForAck = false
ack match {
case Success(Stop) =>
subscriber = None
case Success(Continue) =>
if (bufferQueue.nonEmpty) {
propagate()
} else if (hasEnd) {
stop()
}
case Failure(ex) =>
subscriber = None
subscriber.foreach(_.onError(ex))
}
}
def push(buffer: ByteBuffer): Unit = {
if (hasEnd) throw new IllegalStateException("Trying to push new data to an ended buffer queue")
if (isFull) throw new IllegalStateException("Buffer queue is full")
bufferQueue.enqueue(buffer)
if (isFull) {
feeder.onFull()
}
if (bufferQueue.nonEmpty) {
propagate()
}
}
def end(): Unit = {
hasEnd = true
if (bufferQueue.isEmpty) {
stop()
}
}
def isFull: Boolean = {
bufferQueue.length == capacity
}
def pushError(error: Throwable): Unit = {
this.error = error
this.hasEnd = true
propagate()
}
val observable = new Observable[ByteBuffer]() {
override def unsafeSubscribeFn(sub: Subscriber[ByteBuffer]): Cancelable = {
if (subscriber.isDefined) {
throw new IllegalStateException("A subscriber is already defined")
}
subscriber = Some(sub)
if (bufferQueue.nonEmpty) {
propagate()
} else if (hasEnd) {
stop()
}
cancelable
}
}
private def stop(): Unit = {
subscriber.foreach(_.onComplete())
}
def length: Int = {
bufferQueue.length
}
}
object ByteBufferQueue {
trait Feeder {
def onFull(): Unit
def onFlush(): Unit
}
private val noopFeeder = new Feeder {
override def onFlush(): Unit = ()
override def onFull(): Unit = ()
}
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/response/StreamHttpResponse.scala | package fr.hmil.roshttp.response
import java.nio.ByteBuffer
import fr.hmil.roshttp.BackendConfig
import fr.hmil.roshttp.util.HeaderMap
import monix.execution.Scheduler
import monix.reactive.Observable
import scala.concurrent.Future
class StreamHttpResponse(
val statusCode: Int,
val headers: HeaderMap[String],
val body: Observable[ByteBuffer])
extends HttpResponse
object StreamHttpResponse extends HttpResponseFactory[StreamHttpResponse] {
override def apply(
header: HttpResponseHeader,
bodyStream: Observable[ByteBuffer],
config: BackendConfig)
(implicit scheduler: Scheduler): Future[StreamHttpResponse] =
Future.successful(new StreamHttpResponse(header.statusCode, header.headers, bodyStream))
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/Protocol.scala | package fr.hmil.roshttp
/** Defines the protocol used.
*
* When setting a protocol from a string, we want to preserve the initial case such as
* not to alter the url.
*/
final case class Protocol private(private val name: String) {
override implicit def toString: String = name
override def equals(o: Any): Boolean = o match {
case that: Protocol => that.name.equalsIgnoreCase(this.name)
case _ => false
}
override def hashCode: Int = name.hashCode
}
object Protocol {
val HTTP = fromString("http")
val HTTPS = fromString("https")
def fromString(name: String): Protocol = name.toUpperCase match {
case "HTTP" => Protocol(name)
case "HTTPS" => Protocol(name)
case _ => throw new IllegalArgumentException(s"Invalid protocol: $name")
}
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/HttpDriver.scala | <filename>js/src/main/scala/fr/hmil/roshttp/HttpDriver.scala<gh_stars>100-1000
package fr.hmil.roshttp
import fr.hmil.roshttp.node.Modules.{HttpModule, HttpsModule}
import fr.hmil.roshttp.response.{HttpResponse, HttpResponseFactory}
import monix.execution.Scheduler
import scala.concurrent.Future
private object HttpDriver extends DriverTrait {
private var _driver: Option[DriverTrait] = None
def send[T <: HttpResponse](req: HttpRequest, factory: HttpResponseFactory[T])(implicit scheduler: Scheduler):
Future[T] = {
_driver.getOrElse(chooseBackend()).send(req, factory)
}
private def chooseBackend(): DriverTrait = {
if (HttpModule.isAvailable && HttpsModule.isAvailable) {
_driver = Some(NodeDriver)
} else {
_driver = Some(BrowserDriver)
}
_driver.get
}
}
|
ThorsMjollnir/RosHTTP | jvm/src/main/scala/fr/hmil/roshttp/CrossPlatformUtils.scala | package fr.hmil.roshttp
import java.net.{URLDecoder, URLEncoder}
private object CrossPlatformUtils {
def encodeURIComponent(query: String): String = {
URLEncoder.encode(query, "UTF-8").replace("+", "%20")
}
def decodeURIComponent(query: String): String =
URLDecoder.decode(query, "UTF-8")
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/events/EventEmitter.scala | <reponame>ThorsMjollnir/RosHTTP
package fr.hmil.roshttp.node.events
import scala.scalajs.js
@js.native
private[roshttp] trait EventEmitter extends js.Object {
def on(event: String, cb: js.Function1[js.Dynamic, Unit]): Unit = js.native
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/http/RequestOptions.scala | <gh_stars>0
package fr.hmil.roshttp.node.http
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
@ScalaJSDefined
private[roshttp] trait RequestOptions extends js.Object {
val protocol: String
val host: String
val hostname: String
val family: Int
val port: Int
val localAddress: String
val socketPath: String
val method: String
val path: String
val headers: Map[String, String]
val auth: String
val agent: Agent
// val createConnection
}
private[roshttp] object RequestOptions {
/**
* @param protocol Protocol to use. Defaults to 'http:'.
* @param host A domain name or IP address of the server to issue the request to. Defaults to 'localhost'.
* @param hostname Alias for host. To support url.parse() hostname is preferred over host.
* @param family IP address family to use when resolving host and hostname. Valid values are 4 or 6.
* When unspecified, both IP v4 and v6 will be used.
* @param port Port of remote server. Defaults to 80.
* @param localAddress Local interface to bind for network connections.
* @param socketPath Unix Domain Socket (use one of host:port or socketPath).
* @param method A string specifying the HTTP request method. Defaults to 'GET'.
* @param path Request path. Defaults to '/'. Should include query string if any. E.G. '/index.html?page=12'.
* An exception is thrown when the request path contains illegal characters. Currently, only
* spaces are rejected but that may change in the future.
* @param headers An object containing request headers.
* @param auth Basic authentication i.e. 'user:password' to compute an Authorization header.
* @param agent Controls Agent behavior. When an Agent is used request will default to Connection: keep-alive.
* @return
*/
def apply(
protocol: js.UndefOr[String] = js.undefined,
host: js.UndefOr[String] = js.undefined,
hostname: js.UndefOr[String] = js.undefined,
family: js.UndefOr[Int] = js.undefined,
port: js.UndefOr[Int] = js.undefined,
localAddress: js.UndefOr[String] = js.undefined,
socketPath: js.UndefOr[String] = js.undefined,
method: js.UndefOr[String] = js.undefined,
path: js.UndefOr[String] = js.undefined,
headers: js.UndefOr[js.Dictionary[String]] = js.undefined,
auth: js.UndefOr[String] = js.undefined,
agent: js.UndefOr[Agent] = js.undefined
): RequestOptions = {
val r = js.Dynamic.literal()
protocol.foreach(r.protocol = _)
host.foreach(r.host = _)
hostname.foreach(r.hostname = _)
family.foreach(r.family = _)
port.foreach(r.port = _)
localAddress.foreach(r.localAddress = _)
socketPath.foreach(r.socketPath = _)
method.foreach(r.method = _)
path.foreach(r.path = _)
headers.foreach(r.headers = _)
auth.foreach(r.auth = _)
agent.foreach(r.agent = _)
r.asInstanceOf[RequestOptions]
}
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/Modules.scala | <filename>js/src/main/scala/fr/hmil/roshttp/node/Modules.scala
package fr.hmil.roshttp.node
import fr.hmil.roshttp.node.http.{Http, Https}
/**
* This object allows to access nodejs builtin modules without explicitely calling require.
*
* If a browser shim is used and is accessible in the global context, it will be returned
* and no call to require() will take place
*/
private[roshttp] object Modules {
object HttpModule extends Module("http", Http)
object HttpsModule extends Module("https", Https)
lazy val http: Http = HttpModule.api
lazy val https: Https = HttpsModule.api
}
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/http/Https.scala | <gh_stars>0
package fr.hmil.roshttp.node.http
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
/**
* For our purposes, we can just pretend https has the same interface as http
*/
@js.native
private[roshttp] trait Https extends Http
@js.native
@JSName("https")
private[roshttp] object Https extends Https
|
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/JsEnvUtils.scala | <filename>js/src/main/scala/fr/hmil/roshttp/JsEnvUtils.scala
package fr.hmil.roshttp
/*
Taken from http://japgolly.blogspot.fr/2016/03/scalajs-firefox-chrome-sbt.html
*/
import scala.scalajs.js.Dynamic.global
import scala.util.Try
private object JsEnvUtils {
/** Sample (real) values are:
* - Mozilla/5.0 (Unknown; Linux x86_64) AppleWebKit/538.1 (KHTML, like Gecko) PhantomJS/2.1.1 Safari/538.1
* - Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0
* - Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36
*/
val userAgent: String =
Try(global.navigator.userAgent.asInstanceOf[String]) getOrElse "Unknown"
// Check each browser
val isFirefox = userAgent contains "Firefox"
val isChrome = userAgent contains "Chrome"
val isRealBrowser = isFirefox || isChrome
// Or you can even just check if running in X
val isRunningInX = userAgent contains "X11"
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/util/HeaderMap.scala | package fr.hmil.roshttp.util
import fr.hmil.roshttp.util.HeaderMap.CaseInsensitiveString
import scala.collection.immutable.MapLike
/** A set of HTTP headers identified by case insensitive keys
*
* A Map[CaseInsensitiveString, String] would conform to the strict Map specification
* but it would make the API ugly, forcing the explicit usage of CaseInsensitiveString
* instead of string.
*
* That's why we have the HeaderMap class to represent HTTP headers in a map like
* interface which is nice to use. It is however not *exactly* a map because
* different keys can map to the same value if they are case-insensitive equivalent.
*
* @tparam B Required for MapLike implementation. Should always be set to String.
*/
class HeaderMap[B >: String] private(map: Map[CaseInsensitiveString, B] = Map())
extends Map[String, B]
with MapLike[String, B, HeaderMap[B]] {
override def empty: HeaderMap[B] = new HeaderMap(Map())
override def get(key: String): Option[B] = {
map.get(new CaseInsensitiveString(key))
}
override def iterator: Iterator[(String, B)] = {
map.map({ t => (t._1.value, t._2)}).iterator
}
override def +[B1 >: B](kv: (String, B1)): HeaderMap[B1] = {
val key = new CaseInsensitiveString(kv._1)
new HeaderMap[B1](map - key + (key -> kv._2))
}
override def -(key: String): HeaderMap[B] = {
new HeaderMap[B](map - new CaseInsensitiveString(key))
}
override def toString: String = {
map.map({ t => t._1 + ": " + t._2}).mkString("\n")
}
}
object HeaderMap {
/** Creates a HeaderMap from a map of string to string. */
def apply(map: Map[String, String]): HeaderMap[String] = new HeaderMap(
map.map(t => (new CaseInsensitiveString(t._1), t._2))
)
/** Creates an empty HeaderMap. */
def apply(): HeaderMap[String] = HeaderMap(Map())
/** A string whose equals and hashCode methods are case insensitive. */
class CaseInsensitiveString(val value: String) {
override def equals(other: Any): Boolean = other match {
case s:CaseInsensitiveString => s.value.equalsIgnoreCase(value)
case _ => false
}
override def hashCode(): Int = value.toLowerCase.hashCode
override def toString: String = value
}
} |
ThorsMjollnir/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/http/Agent.scala | <reponame>ThorsMjollnir/RosHTTP
package fr.hmil.roshttp.node.http
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobal
/**
* node http agent API.
*
* This facade is not complete!
*/
@js.native
@JSGlobal
private[roshttp] class Agent extends js.Object {
def this(options: AgentOptions) {
this()
}
// def createConnection(options: net.SocketOptions): net.Socket = js.native -- Not implemented here
// def createConnection(options: net.SocketOptions, js.Function): net.Socket = js.native -- Not implemented here
/**
* Destroy any sockets that are currently in use by the agent.
*
* It is usually not necessary to do this. However, if you are using an agent with
* KeepAlive enabled, then it is best to explicitly shut down the agent when you
* know that it will no longer be used. Otherwise, sockets may hang open for quite
* a long time before the server terminates them.
*/
def destroy(): Unit = js.native
// val freeSockets:
/**
* Get a unique name for a set of request options, to determine whether a connection
* can be reused. In the http agent, this returns host:port:localAddress.
* In the https agent, the name includes the CA, cert, ciphers, and other
* HTTPS/TLS-specific options that determine socket reusability.
*/
def getName(options: RequestOptions): String = js.native
/**
* By default set to 256. For Agents supporting HTTP KeepAlive, this sets the
* maximum number of sockets that will be left open in the free state.
*/
var maxFreeSockets: Integer = js.native
/**
* By default set to Infinity. Determines how many concurrent sockets the agent
* can have open per origin. Origin is either a 'host:port' or
* 'host:port:localAddress' combination.
*/
var maxSockets: Integer = js.native
/**
* An object which contains queues of requests that have not yet been assigned
* to sockets. Do not modify.
*/
// val requests
/**
* An object which contains arrays of sockets currently in use by the Agent.
* Do not modify.
*/
// val sockets: Seq[Socket] = js.native
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/exceptions/ResponseException.scala | <filename>shared/src/main/scala/fr/hmil/roshttp/exceptions/ResponseException.scala
package fr.hmil.roshttp.exceptions
import java.io.IOException
import fr.hmil.roshttp.response.HttpResponseHeader
/** Captures network errors occurring during reception of an HTTP response body.
*
* When this exception occurs, HTTP headers have already been received.
* The response header data is recovered in the header field.
*
* @see [[RequestException]]
*/
case class ResponseException private[roshttp](
cause: Throwable,
header: HttpResponseHeader)
extends IOException("A network error occurred during HTTP response transmission.", cause) |
ThorsMjollnir/RosHTTP | jvm/src/main/scala/fr/hmil/roshttp/JsEnvUtils.scala | <reponame>ThorsMjollnir/RosHTTP
package fr.hmil.roshttp
private object JsEnvUtils {
val userAgent: String = "jvm"
// Check each browser
val isFirefox = false
val isChrome = false
val isRealBrowser = false
// Or you can even just check if running in X
val isRunningInX = false
}
|
ThorsMjollnir/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/body/StreamBody.scala | package fr.hmil.roshttp.body
import java.nio.ByteBuffer
import monix.reactive.Observable
class StreamBody private(
override val content: Observable[ByteBuffer],
override val contentType: String
) extends BodyPart
object StreamBody {
def apply(data: Observable[ByteBuffer], contentType: String = "application/octet-stream"): StreamBody =
new StreamBody(data, contentType)
} |
ThorsMjollnir/RosHTTP | shared/src/test/scala/fr/hmil/roshttp/HttpRequestSpec.scala | <gh_stars>0
package fr.hmil.roshttp
import java.nio.ByteBuffer
import fr.hmil.roshttp.body.Implicits._
import fr.hmil.roshttp.body.JSONBody._
import fr.hmil.roshttp.body._
import fr.hmil.roshttp.exceptions._
import fr.hmil.roshttp.response.SimpleHttpResponse
import monix.execution.Scheduler.Implicits.global
import monix.reactive.Observable
import utest._
object HttpRequestSpec extends TestSuite {
private val SERVER_URL = "http://localhost:3000"
/*
* Status codes defined in HTTP/1.1 spec
*/
private val goodStatus = List(
// We do not support 1xx status codes
200, 201, 202, 203, 204, 205, 206,
300, 301, 302, 303, 304, 305, 306, 307
)
private def badStatus = {
val base = List(
400, 401, 402, 403, 404, 405, 406, 408, 409,
410, 411, 412, 413, 414, 415, 416, 417,
500, 501, 502, 503, 504, 505
)
if (JsEnvUtils.isChrome) {
// Chrome does not support userspace 407 error handling
// see: https://bugs.chromium.org/p/chromium/issues/detail?id=372136
base
} else {
407 :: base
}
}
private val statusText = Map(
200 -> "OK",
201 -> "Created",
202 -> "Accepted",
203 -> "Non-Authoritative Information",
204 -> "",
205 -> "",
206 -> "Partial Content",
300 -> "Multiple Choices",
301 -> "Moved Permanently",
302 -> "Found",
303 -> "See Other",
304 -> "",
305 -> "Use Proxy",
306 -> "306",
307 -> "Temporary Redirect",
400 -> "Bad Request",
401 -> "Unauthorized",
402 -> "Payment Required",
403 -> "Forbidden",
404 -> "Not Found",
405 -> "Method Not Allowed",
406 -> "Not Acceptable",
407 -> "Proxy Authentication Required",
408 -> "Request Timeout",
409 -> "Conflict",
410 -> "Gone",
411 -> "Length Required",
412 -> "Precondition Failed",
413 -> "Payload Too Large",
414 -> "URI Too Long",
415 -> "Unsupported Media Type",
416 -> "Range Not Satisfiable",
417 -> "Expectation Failed",
500 -> "Internal Server Error",
501 -> "Not Implemented",
502 -> "Bad Gateway",
503 -> "Service Unavailable",
504 -> "Gateway Timeout",
505 -> "HTTP Version Not Supported"
)
private val legalMethods = {
val base = "GET" :: "POST" :: "HEAD" :: "OPTIONS" :: "PUT" :: "DELETE" :: Nil
if (JsEnvUtils.isRealBrowser) {
// The jvm cannot send PATCH requests
"PATCH" :: base
} else {
// Browsers cannot send TRACE requests
"TRACE" :: base
}
}
private val IMAGE_BYTES: Array[Byte] = List[Int](
0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00,
0x00, 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x06, 0x00, 0x00, 0x00, 0xC4, 0x0F, 0xBE, 0x8B, 0x00, 0x00, 0x00,
0x06, 0x62, 0x4B, 0x47, 0x44, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0xA0, 0xBD, 0xA7, 0x93, 0x00, 0x00, 0x00,
0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0B, 0x13, 0x00, 0x00, 0x0B, 0x13, 0x01, 0x00, 0x9A, 0x9C, 0x18,
0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4D, 0x45, 0x07, 0xE0, 0x05, 0x0A, 0x0B, 0x1A, 0x39, 0x9E, 0xB0, 0x43,
0x04, 0x00, 0x00, 0x00, 0xF2, 0x49, 0x44, 0x41, 0x54, 0x18, 0xD3, 0x45, 0xCD, 0xBD, 0x4A, 0xC3, 0x50, 0x1C,
0x40, 0xF1, 0x93, 0xFB, 0xBF, 0xB9, 0x4D, 0xD2, 0x56, 0xD4, 0x98, 0xA1, 0x14, 0x05, 0x51, 0x50, 0xA8, 0x76,
0x13, 0x1C, 0x14, 0xF1, 0x19, 0x7C, 0x07, 0x71, 0xE9, 0x03, 0x08, 0x4E, 0xBE, 0x85, 0x83, 0x83, 0x9B, 0x8B,
0x8F, 0xA0, 0x93, 0x64, 0xB0, 0xA0, 0x45, 0x07, 0x6D, 0xD0, 0xCD, 0x8F, 0x45, 0x84, 0x54, 0xA9, 0xB1, 0xF9,
0x72, 0x50, 0xE8, 0x99, 0x7F, 0x70, 0x2C, 0xFE, 0xBB, 0xBF, 0xB8, 0x6C, 0x3E, 0x77, 0xF6, 0x9A, 0x55, 0xB7,
0xB6, 0x5E, 0x29, 0xF3, 0x35, 0x67, 0x94, 0x6E, 0xB4, 0xEE, 0x7A, 0xF3, 0x16, 0xC0, 0xA9, 0x1F, 0xEC, 0x9A,
0xA2, 0x38, 0xF2, 0x6C, 0x83, 0xA7, 0x2C, 0x6A, 0xA2, 0x09, 0x1C, 0x27, 0x9E, 0x7D, 0x8A, 0x26, 0x35, 0xC0,
0x57, 0x59, 0x5A, 0x43, 0xC7, 0x61, 0xA0, 0x35, 0x6F, 0x65, 0x41, 0x94, 0x7C, 0x23, 0x9F, 0xB1, 0x02, 0xD0,
0x00, 0xFE, 0xE2, 0xC2, 0xB5, 0x2B, 0xF6, 0xAD, 0x79, 0x7D, 0x59, 0x6A, 0xA5, 0x59, 0xA5, 0xE3, 0x78, 0x50,
0xAD, 0xCB, 0xF2, 0x20, 0xFE, 0x03, 0x3F, 0xFD, 0x68, 0xB5, 0xAE, 0xED, 0x76, 0x43, 0x14, 0x13, 0x22, 0xF4,
0x8B, 0x9C, 0x30, 0x4B, 0x13, 0x00, 0x05, 0xF0, 0x61, 0x2A, 0x61, 0xB7, 0xBD, 0x72, 0x76, 0xEC, 0x4F, 0x0D,
0x0F, 0xB5, 0xE2, 0x3C, 0x4B, 0xD9, 0x16, 0x71, 0xC7, 0x0B, 0xCA, 0xCD, 0xC6, 0x4D, 0x6F, 0x67, 0xCB, 0x18,
0x5C, 0x11, 0x8C, 0x36, 0xB8, 0xDA, 0x1E, 0x03, 0x94, 0x4A, 0x64, 0x26, 0x88, 0xF2, 0x74, 0xF4, 0xC0, 0xB4,
0xFF, 0x2E, 0x62, 0x85, 0x73, 0xDD, 0xAB, 0x93, 0xC7, 0xFD, 0x03, 0x7E, 0x01, 0x01, 0x9A, 0x49, 0xCF, 0xD0,
0xA6, 0xE4, 0x8F, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4E, 0x44, 0xAE, 0x42, 0x60, 0x82).map(_.toByte).toArray
val tests = this{
"Meta" - {
"The test server should be reachable" - {
HttpRequest(SERVER_URL)
.send().map({ s => s.statusCode ==> 200 })
}
}
"Responses" - {
"with status codes < 400" - {
"should complete the request with success" - {
goodStatus.map(status => {
HttpRequest(SERVER_URL)
.withPath(s"/status/$status")
.send()
.map({ s =>
s.statusCode ==> status
})
}).reduce((f1, f2) => f1.flatMap(_ => f2))
}
}
"with status codes >= 400" - {
"should complete the request with failure" - {
badStatus.map(status =>
HttpRequest(SERVER_URL)
.withPath(s"/status/$status")
.send()
.map(r => r.headers("X-Status-Code") ==> r.statusCode)
.failed.map(_ => "success")
).reduce((f1, f2) => f1.flatMap(_ => f2))
}
}
"with redirects" - {
"follow redirects" - {
HttpRequest(SERVER_URL)
.withPath("/redirect/temporary/echo/redirected")
.send()
.map(res => {
res.body ==> "redirected"
})
}
}
"with timeout" - {
"Throw the appropriate exception" - {
HttpRequest(s"$SERVER_URL/no_response")
.send()
.onFailure {
case TimeoutException(_) => () // success
}
}
}
}
"Buffered responses" - {
"with status code >= 400" - {
"should provide a response body in error handler" - {
badStatus.map(status =>
HttpRequest(SERVER_URL)
.withPath(s"/status/$status")
.send()
.failed.map {
case HttpException(res: SimpleHttpResponse) =>
statusText(res.statusCode) ==> res.body
case e => throw new java.lang.AssertionError("Unexpected failure", e)
}
).reduce((f1, f2) => f1.flatMap(_ => f2))
}
"can be empty" - {
HttpRequest(s"$SERVER_URL/empty_body/400")
.send()
.failed
.map {
case HttpException(res: SimpleHttpResponse) =>
res.body ==> ""
}
}
}
"with status code < 400" - {
"can be empty" - {
HttpRequest(s"$SERVER_URL/empty_body/200")
.send()
.map(response => response.body ==> "")
}
}
"can be chunked and recomposed" - {
HttpRequest(s"$SERVER_URL/echo_repeat/foo")
.withQueryParameters(
"repeat" -> "4",
"delay" -> "1000")
.withBackendConfig(BackendConfig(maxChunkSize = 4))
.send()
.map(res => res.body ==> "foofoofoofoo")
}
"can contain multibyte characters" - {
val payload = "12\uD83D\uDCA978"
HttpRequest(s"$SERVER_URL/multibyte_string")
.send()
.map(res => res.body ==> payload)
}
"can contain multibyte characters split by chunk boundary" - {
val payload = "12\uD83D\uDCA978"
HttpRequest(s"$SERVER_URL/multibyte_string")
.withBackendConfig(BackendConfig(
maxChunkSize = 4
))
.send()
.map(res => res.body ==> payload)
}
}
"Streamed response body" - {
"work with a single chunk" - {
val greeting_bytes: ByteBuffer = ByteBuffer.wrap("Hello World!".getBytes)
HttpRequest(s"$SERVER_URL")
.stream()
.map({ r =>
// Take only the first element because the body is so short we know it will fit in one buffer
r.body.firstL.map(_.get ==> greeting_bytes)
})
}
"fail on bad status code" - {
HttpRequest(SERVER_URL)
.withPath(s"/status/400")
.stream()
.map(r => r.headers("X-Status-Code") ==> r.statusCode)
.failed.map(_ => "success")
}
"chunks are capped to chunkSize config" - {
val config = BackendConfig(maxChunkSize = 128)
HttpRequest(s"$SERVER_URL/resources/icon.png")
.withBackendConfig(config)
.stream()
.flatMap(_
.body
.map({buffer =>
assert(buffer.limit <= config.maxChunkSize)
})
.bufferTumbling(3)
.firstL.runAsync
)
}
}
"Query string" - {
"set in constructor" - {
"vanilla" - {
HttpRequest(s"$SERVER_URL/query?Hello%20world.")
.send()
.map(res => {
res.body ==> "Hello world."
})
}
"with illegal characters" - {
HttpRequest(s"$SERVER_URL/query?Heizölrückstoßabdämpfung%20+")
.send()
.map(res => {
res.body ==> "Heizölrückstoßabdämpfung +"
})
}
"with key-value pairs" - {
HttpRequest(s"$SERVER_URL/query/parsed?foo=bar&hello=world")
.send()
.map(res => {
res.body ==> "{\"foo\":\"bar\",\"hello\":\"world\"}"
})
}
}
"set in withQueryString" - {
"vanilla" - {
HttpRequest(s"$SERVER_URL/query")
.withQueryString("Hello world.")
.send()
.map(res => {
res.body ==> "Hello world."
})
}
"with illegal characters" - {
HttpRequest(s"$SERVER_URL/query")
.withQueryString("Heizölrückstoßabdämpfung %20+")
.send()
.map(res => {
res.body ==> "Heizölrückstoßabdämpfung %20+"
})
}
"is escaped" - {
HttpRequest(s"$SERVER_URL/query")
.withQueryString("Heizölrückstoßabdämpfung")
.queryString.get ==> "Heiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung"
}
}
"set in withRawQueryString" - {
HttpRequest(s"$SERVER_URL/query")
.withQueryStringRaw("Heiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung")
.send()
.map(res => {
res.body ==> "Heizölrückstoßabdämpfung"
})
}
"set in withQueryParameter" - {
"single" - {
HttpRequest(s"$SERVER_URL/query/parsed")
.withQueryParameter("device", "neon")
.send()
.map(res => {
res.body ==> "{\"device\":\"neon\"}"
})
}
"added in batch" - {
HttpRequest(s"$SERVER_URL/query/parsed")
.withQueryParameters(
"device" -> "neon",
"element" -> "argon")
.send()
.map(res => {
res.body ==> "{\"device\":\"neon\",\"element\":\"argon\"}"
})
}
"added in batch with illegal characters" - {
HttpRequest(s"$SERVER_URL/query/parsed")
.withQueryParameters(
" zařízení" -> "topný olej vůle potlačující",
"chäřac+=r&" -> "+Heizölrückstoßabdämpfung=r&")
.send()
.map(res => {
res.body ==> "{\" zařízení\":\"topný olej vůle potlačující\"," +
"\"chäřac+=r&\":\"+Heizölrückstoßabdämpfung=r&\"}"
})
}
"added in sequence" - {
HttpRequest(s"$SERVER_URL/query/parsed")
.withQueryParameters(
"element" -> "argon",
"device" -> "chair"
)
.withQueryParameter("tool", "hammer")
.withQueryParameter("device", "neon")
.send()
.map(res => {
res.body ==> "{\"element\":\"argon\",\"device\":[\"chair\",\"neon\"],\"tool\":\"hammer\"}"
})
}
"as list parameter" - {
HttpRequest(s"$SERVER_URL/query/parsed")
.withQuerySeqParameter("map", Seq("foo", "bar"))
.send()
.map(res => {
res.body ==> "{\"map\":[\"foo\",\"bar\"]}"
})
}
}
"removed" - {
val req = HttpRequest(s"$SERVER_URL/query/parsed")
.withQueryString("device=chair")
.withoutQueryString()
assert(req.queryString.isEmpty)
}
}
"Protocol" - {
"can be set to HTTP and HTTPS" - {
HttpRequest()
.withProtocol(Protocol.HTTP)
.withProtocol(Protocol.HTTPS)
}
}
"Request headers" - {
"Can be set with a map" - {
val headers = Map(
"accept" -> "text/html, application/xhtml",
"Cache-Control" -> "max-age=0",
"custom" -> "foobar")
val req = HttpRequest(s"$SERVER_URL/headers")
.withHeaders(headers.toSeq: _*)
// Test with corrected case
req.headers ==> headers
req.send().map(res => {
assert(res.body.contains("\"accept\":\"text/html, application/xhtml\""))
assert(res.body.contains("\"cache-control\":\"max-age=0\""))
assert(res.body.contains("\"custom\":\"foobar\""))
})
}
"Can be set individually" - {
val req = HttpRequest(s"$SERVER_URL/headers")
.withHeader("cache-control", "max-age=0")
.withHeader("Custom", "foobar")
req.headers ==> Map(
"cache-control" -> "max-age=0",
"Custom" -> "foobar")
req.send().map(res => {
assert(res.body.contains("\"cache-control\":\"max-age=0\""))
assert(res.body.contains("\"custom\":\"foobar\""))
})
}
"Overwrite previous value when set" - {
val req = HttpRequest(s"$SERVER_URL/headers")
.withHeaders(
"accept" -> "text/html, application/xhtml",
"Cache-Control" -> "max-age=0",
"custom" -> "foobar"
)
.withHeaders(
"Custom" -> "barbar",
"Accept" -> "application/json"
)
.withHeader("cache-control", "max-age=128")
req.headers ==> Map(
"cache-control" -> "max-age=128",
"Custom" -> "barbar",
"Accept" -> "application/json")
req.send().map(res => {
assert(res.body.contains("\"cache-control\":\"max-age=128\""))
assert(res.body.contains("\"custom\":\"barbar\""))
assert(res.body.contains("\"accept\":\"application/json\""))
})
}
"Override body content-type" - {
HttpRequest(s"$SERVER_URL/headers")
.withBody(PlainTextBody("Hello world"))
.withHeader("Content-Type", "text/html")
.withMethod(Method.POST)
.send()
.map(res => {
assert(res.body.contains("\"content-type\":\"text/html\""))
assert(!res.body.contains("\"content-type\":\"text/plain\""))
})
}
}
"Response headers" - {
"can be read in the general case" - {
HttpRequest(s"$SERVER_URL/")
.send()
.map({
res =>
res.headers("X-Powered-By") ==> "Express"
})
}
"can be read in the error case" - {
HttpRequest(s"$SERVER_URL/status/400")
.send()
.failed.map {
case HttpException(res: SimpleHttpResponse) =>
res.headers("X-Powered-By") ==> "Express"
}
}
}
"Http method" - {
"can be set to any legal value" - {
legalMethods.map(method =>
HttpRequest(s"$SERVER_URL/method")
.withMethod(Method(method))
.send()
.map(_.headers("X-Request-Method") ==> method)
).reduce((f1, f2) => f1.flatMap(_=>f2))
}
"ignores case and capitalizes" - {
legalMethods.map(method =>
HttpRequest(s"$SERVER_URL/method")
.withMethod(Method(method.toLowerCase))
.send()
.map(_.headers("X-Request-Method") ==> method)
).reduce((f1, f2) => f1.flatMap(_ => f2))
}
}
"Request body" - {
"Plain text" - {
"works with ASCII strings" - {
HttpRequest(s"$SERVER_URL/body")
.post(PlainTextBody("Hello world"))
.map({ res =>
res.body ==> "Hello world"
res.headers("Content-Type").toLowerCase ==> "text/plain; charset=utf-8"
})
}
"works with non-ASCII strings" - {
HttpRequest(s"$SERVER_URL/body")
.post(PlainTextBody("Heizölrückstoßabdämpfung"))
.map({ res =>
res.body ==> "Heizölrückstoßabdämpfung"
res.headers("Content-Type").toLowerCase ==> "text/plain; charset=utf-8"
})
}
}
"Multipart" - {
"works as intended" - {
val part = MultiPartBody(
"foo" -> PlainTextBody("bar"),
"engine" -> PlainTextBody("Heizölrückstoßabdämpfung"))
HttpRequest(s"$SERVER_URL/body")
.post(part)
.map({ res =>
res.body ==> "{\"foo\":\"bar\",\"engine\":\"Heizölrückstoßabdämpfung\"}"
res.headers("Content-Type").toLowerCase ==>
s"multipart/form-data; boundary=${part.boundary}; charset=utf-8"
})
}
}
"URL encoded" - {
"works as intended" - {
val part = URLEncodedBody(
"foo" -> "bar",
"engine" -> "Heizölrückstoßabdämpfung")
HttpRequest(s"$SERVER_URL/body")
.post(part)
.map({ res =>
res.body ==> "{\"foo\":\"bar\",\"engine\":\"Heizölrückstoßabdämpfung\"}"
res.headers("Content-Type").toLowerCase ==> s"application/x-www-form-urlencoded; charset=utf-8"
})
}
}
"JSON" - {
"works as intended" - {
val part = JSONObject(
"foo" -> 42,
"engine" -> "Heizölrückstoßabdämpfung",
"\"quoted'" -> "Has \" quotes")
HttpRequest(s"$SERVER_URL/body")
.post(part)
.map({ res =>
res.body ==> "{\"foo\":42,\"engine\":\"Heizölrückstoßabdämpfung\"," +
"\"\\\"quoted'\":\"Has \\\" quotes\"}"
res.headers("Content-Type").toLowerCase ==> s"application/json; charset=utf-8"
})
}
}
"Byte Buffer" - {
"can send a binary buffer" - {
HttpRequest(s"$SERVER_URL/compare/icon.png")
.post(ByteBufferBody(ByteBuffer.wrap(IMAGE_BYTES)))
}
}
"streamed" - {
"with wrapped array ByteBuffer" - {
"is properly sent" - {
HttpRequest(s"$SERVER_URL/compare/icon.png")
.post(
// Splits the image bytes into chunks to create a streamed body
StreamBody(
Observable.fromIterable(Seq(IMAGE_BYTES: _*)
.grouped(12)
.toSeq
).map(b => ByteBuffer.wrap(b.toArray))
)
)
}
}
"with native ByteBuffer" - {
"is properly sent" - {
val nativeBufferSeq = Seq(IMAGE_BYTES: _*)
.grouped(12)
.map({chunk =>
val b = ByteBuffer.allocateDirect(chunk.size)
var i = 0
while (i < chunk.size) {
b.put(chunk(i))
i += 1
}
b.rewind()
b
}).toSeq
HttpRequest(s"$SERVER_URL/compare/icon.png")
.post(StreamBody(Observable.fromIterable(nativeBufferSeq)))
}
}
"with read-only ByteBuffer" - {
"is properly sent" - {
val readOnlyBuffers = Observable.fromIterable(
Seq(IMAGE_BYTES: _*)
.grouped(12)
.toSeq)
.map({ b =>
val res = ByteBuffer.wrap(b.toArray).asReadOnlyBuffer()
assert(!res.hasArray)
res
})
HttpRequest(s"$SERVER_URL/compare/icon.png")
.post(StreamBody(readOnlyBuffers))
.recover {
case e: UploadStreamException =>
e.printStackTrace()
throw e
}
}
}
"embedded in multipart" - {
"handles errors correctly" - {
def stateAction(i: Int) = {
if (i == 0) throw new Exception("Stream error")
(ByteBuffer.allocate(1), i - 1)
}
HttpRequest(s"$SERVER_URL/does_not_exist")
.post(MultiPartBody("stream" -> StreamBody(Observable.fromStateAction(stateAction)(3))))
.recover({
case e: UploadStreamException => e
})
}
"is properly sent" - {
val part = MultiPartBody(
"stream" -> StreamBody(Observable.fromIterator(new Iterator[ByteBuffer]() {
private var emitted = false
override def hasNext: Boolean = !emitted
override def next(): ByteBuffer = {
emitted = true
ByteBuffer.wrap("Bonjour.".getBytes)
}
})))
HttpRequest(s"$SERVER_URL/body")
.post(part)
.map({ res =>
res.body ==> "{\"stream\":\"Bonjour.\"}"
})
}
}
"handles errors correctly" - {
HttpRequest(s"$SERVER_URL/does_not_exist")
.post(StreamBody(
Observable.fromStateAction({ i: Int =>
if (i == 0) throw new Exception("Stream error")
(ByteBuffer.allocate(1), i - 1)
})(3)
))
.recover({
case e: UploadStreamException => e
})
}
}
}
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/ArrayHelperSuite.scala | <filename>atlas-core/src/test/scala/com/netflix/atlas/core/util/ArrayHelperSuite.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import munit.FunSuite
import java.util.UUID
class ArrayHelperSuite extends FunSuite {
test("merge arrays, limit 1: empty, one") {
val v1 = Array.empty[String]
val v2 = Array("a")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a").toSeq)
}
test("merge arrays, limit 1: empty, abcde") {
val v1 = Array.empty[String]
val v2 = Array("a", "b", "c", "d", "e")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a").toSeq)
}
test("merge arrays, limit 1: abcde, empty") {
val v1 = Array("a", "b", "c", "d", "e")
val v2 = Array.empty[String]
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a").toSeq)
}
test("merge arrays, limit 1: b, a") {
val v1 = Array("b")
val v2 = Array("a")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a").toSeq)
}
test("merge arrays, limit 1: a, b") {
val v1 = Array("a")
val v2 = Array("b")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a").toSeq)
}
test("merge arrays, limit 2: b, a") {
val v1 = Array("b")
val v2 = Array("a")
val actual = ArrayHelper.merger[String](2).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a", "b").toSeq)
}
test("merge arrays, limit 2: ab, a") {
val v1 = Array("a", "b")
val v2 = Array("a")
val actual = ArrayHelper.merger[String](2).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a", "b").toSeq)
}
test("merge arrays, limit 2: aggregate duplicates") {
type T = (String, Int)
val v1 = Array("a" -> 1, "b" -> 1)
val v2 = Array("a" -> 1)
val actual = ArrayHelper
.merger[T](2, (a: T, b: T) => a._1.compareTo(b._1), (a: T, b: T) => a._1 -> (a._2 + b._2))
.merge(v1)
.merge(v2)
.toArray
assertEquals(actual.toSeq, Array("a" -> 2, "b" -> 1).toSeq)
}
test("merge arrays, limit 2: bc, ad") {
val v1 = Array("b", "c")
val v2 = Array("a", "d")
val actual = ArrayHelper.merger[String](2).merge(v1).merge(v2).toArray
assertEquals(actual.toSeq, Array("a", "b").toSeq)
}
test("merge list, limit 1: empty, one") {
val v1 = List.empty[String]
val v2 = List("a")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toList
assertEquals(actual, List("a"))
}
test("merge list, limit 1: empty, abcde") {
val v1 = List.empty[String]
val v2 = List("a", "b", "c", "d", "e")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toList
assertEquals(actual, List("a"))
}
test("merge list, limit 1: abcde, empty") {
val v1 = List("a", "b", "c", "d", "e")
val v2 = List.empty[String]
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toList
assertEquals(actual, List("a"))
}
test("merge list, limit 1: b, a") {
val v1 = List("b")
val v2 = List("a")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toList
assertEquals(actual, List("a"))
}
test("merge list, limit 1: a, b") {
val v1 = List("a")
val v2 = List("b")
val actual = ArrayHelper.merger[String](1).merge(v1).merge(v2).toList
assertEquals(actual, List("a"))
}
test("merge list, limit 2: b, a") {
val v1 = List("b")
val v2 = List("a")
val actual = ArrayHelper.merger[String](2).merge(v1).merge(v2).toList
assertEquals(actual, List("a", "b"))
}
test("merge list, limit 2: ab, a") {
val v1 = List("a", "b")
val v2 = List("a")
val actual = ArrayHelper.merger[String](2).merge(v1).merge(v2).toList
assertEquals(actual, List("a", "b"))
}
test("merge list, limit 2: bc, ad") {
val v1 = List("b", "c")
val v2 = List("a", "d")
val actual = ArrayHelper.merger[String](2).merge(v1).merge(v2).toList
assertEquals(actual, List("a", "b"))
}
test("sortAndDedup, empty") {
val vs = Array.empty[String]
val length = ArrayHelper.sortAndDedup(vs)
assertEquals(length, 0)
}
test("sortAndDedup, one item") {
val vs = Array("a")
val length = ArrayHelper.sortAndDedup(vs)
assertEquals(length, 1)
}
test("sortAndDedup, one item after dedup") {
val vs = Array("a", "a", "a")
val length = ArrayHelper.sortAndDedup(vs)
assertEquals(length, 1)
}
test("sortAndDedup, several items") {
val vs = Array("c", "a", "b", "a")
val length = ArrayHelper.sortAndDedup(vs)
assertEquals(length, 3)
assertEquals(vs.toSeq.take(length), Seq("a", "b", "c"))
}
test("sortAndDedup, several items aggregate") {
type T = (String, Int)
val vs = Array("c", "a", "b", "b", "a", "d", "a").map(k => k -> 1)
val length = ArrayHelper.sortAndDedup(
(a: T, b: T) => a._1.compareTo(b._1),
(a: T, b: T) => a._1 -> (a._2 + b._2),
vs,
vs.length
)
assertEquals(length, 4)
assertEquals(vs.toSeq.take(length), Seq("a" -> 3, "b" -> 2, "c" -> 1, "d" -> 1))
}
test("sortAndDedup, partially filled array") {
type T = (String, Int)
val vs = Array("c", "a", "b", "b", "a", "d", "a", null, null)
.map(k => if (k == null) null else k -> 1)
val length = ArrayHelper.sortAndDedup(
(a: T, b: T) => a._1.compareTo(b._1),
(a: T, b: T) => a._1 -> (a._2 + b._2),
vs,
7
)
assertEquals(length, 4)
assertEquals(vs.toSeq.take(length), Seq("a" -> 3, "b" -> 2, "c" -> 1, "d" -> 1))
}
test("sortPairs, empty") {
val data = Array.empty[String]
ArrayHelper.sortPairs(data)
}
test("sortPairs, single pair") {
val data = Array("b", "1")
ArrayHelper.sortPairs(data)
val expected = Array("b", "1")
assertEquals(expected.toList, data.toList)
}
test("sortPairs, two pairs") {
val data = Array("b", "1", "a", "2")
ArrayHelper.sortPairs(data)
val expected = Array("a", "2", "b", "1")
assertEquals(expected.toList, data.toList)
}
test("sortPairs, random") {
val input = (0 until 50).map(i => UUID.randomUUID().toString -> i.toString)
val data = input.flatMap(t => List(t._1, t._2)).toArray
ArrayHelper.sortPairs(data)
val expected = input.toList.sortWith(_._1 < _._1).flatMap(t => List(t._1, t._2))
assertEquals(expected, data.toList)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/norm/RateValueFunctionSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
import munit.FunSuite
class RateValueFunctionSuite extends FunSuite {
private def newFunction = {
val listVF = new ListValueFunction
val rateVF = new RateValueFunction(listVF)
listVF.f = rateVF
listVF
}
test("basic") {
val vf = newFunction
assertEquals(vf.update(5L, 1.0), Nil)
assertEquals(vf.update(15L, 2.0), List(15L -> 100.0))
assertEquals(vf.update(25L, 4.0), List(25L -> 200.0))
}
test("decreasing value") {
val vf = newFunction
assertEquals(vf.update(5, 1.0), Nil)
assertEquals(vf.update(15, 2.0), List(15L -> 100.0))
assertEquals(vf.update(25, 1.0), List(25L -> 0.0))
}
}
|
dmuino/atlas | atlas-postgres/src/main/scala/com/netflix/atlas/postgres/CopyBuffer.scala | <gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.postgres
import com.netflix.atlas.core.model.ItemId
import com.netflix.atlas.core.util.SortedTagMap
import org.postgresql.copy.CopyManager
/**
* Buffer for storing data in memory in a format that can be passed to the Postgres COPY
* operation. The caller is responsible for knowing the table format and putting values
* in the expected order. Typical usage:
*
* ```
* val buffer = ...
* rows.foreach { row =>
* buffer.putString(row.column(1)).putInt(row.column(2))
* if (!buffer.nextRow()) {
* buffer.copyIn(copyManager, tableName)
* buffer.putString(row.column(1)).putInt(row.column(2)).nextRow()
* }
* }
* buffer.copyIn(copyManager, tableName)
* ```
*/
trait CopyBuffer {
/** Put an id column value into the buffer. */
def putId(id: ItemId): CopyBuffer
/** Put a string column value into the buffer. */
def putString(str: String): CopyBuffer
/** Put a JSON column value into the buffer. */
def putTagsJson(tags: SortedTagMap): CopyBuffer
/** Put a JSONB column value into the buffer. */
def putTagsJsonb(tags: SortedTagMap): CopyBuffer
/** Put an HSTORE column value into the buffer. */
def putTagsHstore(tags: SortedTagMap): CopyBuffer
/** Put a JSON string column value as text into the buffer. */
def putTagsText(tags: SortedTagMap): CopyBuffer
/** Put a signed 2-byte integer column value into the buffer. */
def putShort(value: Short): CopyBuffer
/** Put a signed 4-byte integer column value into the buffer. */
def putInt(value: Int): CopyBuffer
/** Put a signed 8-byte integer column value into the buffer. */
def putLong(value: Long): CopyBuffer
/** Put an 8-byte floating point column value into the buffer. */
def putDouble(value: Double): CopyBuffer
/** Put an array of 8-byte floating point column value into the buffer. */
def putDoubleArray(values: Array[Double]): CopyBuffer
/**
* Indicate the end of the row and prepare for the next. Returns true if the row
* was able to fit in the buffer. Otherwise the buffer should be copied to Postgres
* and cleared before re-adding the row.
*
* If a single row is too big to fit in the buffer, then an IllegalStateException
* will be thrown to avoid an endless loop.
*/
def nextRow(): Boolean
/** Returns true if there is space remaining in the buffer. */
def hasRemaining: Boolean
/** Returns the number of bytes remaining in the buffer. */
def remaining: Int
/** Returns the number of completed rows that are in the buffer. */
def rows: Int
/** Clear the contents of the buffer so it can be reused. */
def clear(): Unit
/** Copy the data in this buffer into the specified table. */
def copyIn(copyManager: CopyManager, table: String): Unit
}
|
dmuino/atlas | atlas-akka-testkit/src/main/scala/com/netflix/atlas/akka/testkit/MUnitRouteSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.akka.testkit
import akka.http.scaladsl.server.ExceptionHandler
import akka.http.scaladsl.testkit.RouteTest
import akka.http.scaladsl.testkit.TestFrameworkInterface
import munit.FunSuite
abstract class MUnitRouteSuite extends FunSuite with RouteTest with TestFrameworkInterface {
override def failTest(msg: String): Nothing = {
fail(msg)
}
override def testExceptionHandler: ExceptionHandler = ExceptionHandler {
case e: Exception => throw e
case e: AssertionError => throw e
}
}
|
dmuino/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/validation/HasKeyRule.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.validation
import com.netflix.atlas.core.util.IdMap
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.core.util.SortedTagMap
import com.netflix.spectator.api.Id
import com.typesafe.config.Config
/**
* Verifies that the tags contain a specified key. Sample config:
*
* ```
* key = name
* ```
*/
case class HasKeyRule(key: String) extends Rule {
override def validate(tags: SmallHashMap[String, String]): ValidationResult = {
if (tags.contains(key))
ValidationResult.Pass
else
failure(s"missing key '$key'", tags)
}
override def validate(tags: SortedTagMap): ValidationResult = {
if (tags.contains(key))
ValidationResult.Pass
else
failure(s"missing key '$key'", tags)
}
private def containsKey(id: Id): Boolean = {
val size = id.size()
var i = 1 // skip name
while (i < size) {
val k = id.getKey(i)
// The id tags are sorted by key, so if the search key is less
// than the key from the id, then it will not be present and we
// can short circuit the check.
if (key <= k) return key == k
i += 1
}
false
}
override def validate(id: Id): ValidationResult = {
if (key == "name" || containsKey(id))
ValidationResult.Pass
else
failure(s"missing key '$key'", IdMap(id))
}
}
object HasKeyRule {
def apply(config: Config): HasKeyRule = {
val key = config.getString("key")
new HasKeyRule(key)
}
}
|
dmuino/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/util/FastGzipOutputStream.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import java.io.OutputStream
import java.util.zip.Deflater
import java.util.zip.GZIPOutputStream
/** Wrap GZIPOutputStream to set the best speed compression level. */
final class FastGzipOutputStream(out: OutputStream) extends GZIPOutputStream(out) {
`def`.setLevel(Deflater.BEST_SPEED)
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/CardinalityEstimatorSuite.scala | <filename>atlas-core/src/test/scala/com/netflix/atlas/core/util/CardinalityEstimatorSuite.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import munit.FunSuite
import scala.util.Using
class CardinalityEstimatorSuite extends FunSuite {
private def check(estimator: CardinalityEstimator, values: Seq[String]): Unit = {
var errorSum = 0.0
values.zipWithIndex.foreach {
case (v, i) =>
estimator.update(v)
val actual = i + 1
val estimate = estimator.cardinality
val percentError = 100.0 * math.abs(estimate - actual) / actual
errorSum += percentError
}
val avgPctError = errorSum / values.size
assert(avgPctError < 5, "error should be less than 5%")
}
test("estimate sha1 string") {
val values = Using.resource(Streams.resource("cardinalityEstimator.txt")) { in =>
Streams.lines(in).toList
}
check(CardinalityEstimator.newEstimator(), values.slice(0, 1))
check(CardinalityEstimator.newEstimator(), values.slice(0, 10))
check(CardinalityEstimator.newEstimator(), values.slice(0, 100))
check(CardinalityEstimator.newEstimator(), values.slice(0, 200))
}
test("estimate int string") {
// verify reasonably accurate estimate with strings that are fairly similar
val values = (0 until 1000).map(_.toString)
check(CardinalityEstimator.newEstimator(), values.slice(0, 1))
check(CardinalityEstimator.newEstimator(), values.slice(0, 10))
check(CardinalityEstimator.newEstimator(), values.slice(0, 100))
check(CardinalityEstimator.newEstimator(), values.slice(0, 200))
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/ByteBufferInputStreamSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import munit.FunSuite
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import scala.util.Using
class ByteBufferInputStreamSuite extends FunSuite {
private def wrap(str: String): ByteBuffer = {
ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8))
}
test("read()") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
assertEquals(in.read(), 'a'.toInt)
assertEquals(in.read(), 'b'.toInt)
assertEquals(in.read(), 'c'.toInt)
assertEquals(in.read(), -1)
}
test("read(buf)") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
val array = new Array[Byte](2)
assertEquals(in.read(array), 2)
assertEquals(array.toSeq, Array[Byte]('a', 'b').toSeq)
assertEquals(in.read(array), 1)
assertEquals(array.toSeq, Array[Byte]('c', 'b').toSeq) // b left over from previous
assertEquals(in.read(array), -1)
}
test("read(buf, offset, length)") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
val array = new Array[Byte](5)
assertEquals(in.read(array, 2, 3), 3)
assertEquals(array.toSeq, Array[Byte]('\u0000', '\u0000', 'a', 'b', 'c').toSeq)
assertEquals(in.read(array), -1)
}
test("available()") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
assertEquals(in.available(), 3)
assertEquals(in.skip(2), 2L)
assertEquals(in.available(), 1)
assertEquals(in.read(), 'c'.toInt)
assertEquals(in.available(), 0)
}
test("skip()") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
assertEquals(in.skip(2), 2L)
assertEquals(in.read(), 'c'.toInt)
assertEquals(in.read(), -1)
assertEquals(in.skip(2), 0L)
}
test("mark() and reset()") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
assert(in.markSupported())
assertEquals(in.read(), 'a'.toInt)
in.mark(5)
assertEquals(in.read(), 'b'.toInt)
assertEquals(in.read(), 'c'.toInt)
in.reset()
assertEquals(in.read(), 'b'.toInt)
in.reset()
assertEquals(in.read(), 'b'.toInt)
}
test("close()") {
val buffer = wrap("abc")
val in = new ByteBufferInputStream(buffer)
(0 until 10).foreach { _ =>
Using.resource(in) { r =>
assertEquals(r.read(), 'a'.toInt)
r.skip(100)
assertEquals(r.read(), -1)
}
}
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/IdMapSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import com.netflix.spectator.api.Id
import munit.FunSuite
class IdMapSuite extends FunSuite {
test("basic map") {
assertEquals(Map("name" -> "foo"), IdMap(Id.create("foo")))
}
test("removed") {
val id = Id.create("foo").withTags("a", "1", "b", "2")
assertEquals(IdMap(id).removed("name"), Map("a" -> "1", "b" -> "2"))
assertEquals(IdMap(id).removed("a"), Map("name" -> "foo", "b" -> "2"))
}
test("updated") {
val id = Id.create("foo").withTags("a", "1")
assertEquals(IdMap(id).updated("b", "2"), Map("name" -> "foo", "a" -> "1", "b" -> "2"))
}
test("get") {
val id = Id.create("foo").withTags("a", "1", "b", "2")
assertEquals(IdMap(id).get("name"), Some("foo"))
assertEquals(IdMap(id).get("a"), Some("1"))
assertEquals(IdMap(id).get("b"), Some("2"))
assertEquals(IdMap(id).get("c"), None)
}
test("iterator") {
val id = Id.create("foo").withTags("a", "1", "b", "2")
val expected = List(
"name" -> "foo",
"a" -> "1",
"b" -> "2"
)
assertEquals(IdMap(id).iterator.toList, expected)
}
test("foreachEntry") {
val id = Id.create("foo").withTags("a", "1", "b", "2")
val builder = List.newBuilder[(String, String)]
IdMap(id).foreachEntry { (k, v) =>
builder += k -> v
}
val actual = builder.result()
val expected = List(
"name" -> "foo",
"a" -> "1",
"b" -> "2"
)
assertEquals(actual, expected)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/model/NamedRewriteSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import java.time.Duration
import com.netflix.atlas.core.stacklang.Interpreter
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class NamedRewriteSuite extends FunSuite {
private val config = ConfigFactory.parseString("""
|atlas.core.vocabulary {
| words = []
|custom-averages = [
| {
| name = "node-avg"
| base-query = "name,numNodes,:eq"
| keys = ["app"]
| }
|]
|}
""".stripMargin)
private val interpreter = Interpreter(new CustomVocabulary(config).allWords)
private def rawEval(program: String): List[StyleExpr] = {
interpreter.execute(program).stack.flatMap {
case ModelExtractors.PresentationType(t) => t.perOffset
case v => throw new MatchError(v)
}
}
private def eval(program: String): List[StyleExpr] = {
interpreter.execute(program).stack.flatMap {
case ModelExtractors.PresentationType(t) =>
val expanded = t.rewrite {
case nr: MathExpr.NamedRewrite => nr.evalExpr
}
expanded.asInstanceOf[StyleExpr].perOffset
case v =>
throw new MatchError(v)
}
}
test("avg") {
val actual = eval("name,a,:eq,:avg")
val expected = eval("name,a,:eq,:sum,name,a,:eq,:count,:div")
assertEquals(actual, expected)
}
test("avg with group by") {
val actual = eval("name,a,:eq,:avg,(,name,),:by")
val expected = eval("name,a,:eq,:sum,name,a,:eq,:count,:div,(,name,),:by")
assertEquals(actual, expected)
}
test("dist-max") {
val actual = eval("name,a,:eq,:dist-max")
val expected = eval("statistic,max,:eq,name,a,:eq,:and,:max")
assertEquals(actual, expected)
}
test("dist-max with group by") {
val actual = eval("name,a,:eq,:dist-max,(,name,),:by")
val expected = eval("statistic,max,:eq,name,a,:eq,:and,:max,(,name,),:by")
assertEquals(actual, expected)
}
test("dist-max with offset") {
val actual = eval("name,a,:eq,:dist-max,1h,:offset")
val expected = eval("statistic,max,:eq,name,a,:eq,:and,:max,1h,:offset")
assertEquals(actual, expected)
}
test("dist-avg") {
val actual = eval("name,a,:eq,:dist-avg")
val expected = eval(
"statistic,(,totalTime,totalAmount,),:in,:sum,statistic,count,:eq,:sum,:div,name,a,:eq,:cq"
)
assertEquals(actual, expected)
}
test("dist-avg with group by") {
val actual = eval("name,a,:eq,:dist-avg,(,name,),:by")
val expected = eval(
"statistic,(,totalTime,totalAmount,),:in,:sum,statistic,count,:eq,:sum,:div,name,a,:eq,:cq,(,name,),:by"
)
assertEquals(actual, expected)
}
test("avg, group by with offset") {
val actual = eval("name,a,:eq,:avg,(,b,),:by,1h,:offset")
val expected = eval("name,a,:eq,:dup,:sum,:swap,:count,:div,(,b,),:by,1h,:offset")
assertEquals(actual, expected)
}
test("avg, group by, max with offset") {
val actual = eval("name,a,:eq,:avg,(,b,),:by,:max,1h,:offset")
val expected = eval("name,a,:eq,:dup,:sum,:swap,:count,:div,1h,:offset,(,b,),:by,:max")
assertEquals(actual, expected)
}
test("node-avg, group by, max with offset") {
val actual = eval("name,a,:eq,:node-avg,(,app,),:by,:max,1h,:offset")
val expected = eval("name,a,:eq,name,numNodes,:eq,:div,1h,:offset,(,app,),:by,:max")
assertEquals(actual, expected)
}
test("node-avg, offset maintained after query rewrite") {
val exprs = rawEval("name,a,:eq,:node-avg,1h,:offset").map { expr =>
expr.rewrite {
case q: Query => Query.And(q, Query.Equal("region", "east"))
}
}
val offsets = exprs
.collect {
case t: StyleExpr =>
t.expr.dataExprs.map(_.offset)
}
.flatten
.distinct
assertEquals(offsets, List(Duration.ofHours(1)))
}
test("node-avg, group by, offset maintained after query rewrite") {
val exprs = rawEval("name,a,:eq,:node-avg,(,app,),:by,1h,:offset").map { expr =>
expr.rewrite {
case q: Query => Query.And(q, Query.Equal("region", "east"))
}
}
val offsets = exprs
.collect {
case t: StyleExpr =>
t.expr.dataExprs.map(_.offset)
}
.flatten
.distinct
assertEquals(offsets, List(Duration.ofHours(1)))
}
test("percentiles, offset maintained after query rewrite") {
val exprs = rawEval("name,a,:eq,(,99,),:percentiles,1h,:offset").map { expr =>
expr.rewrite {
case q: Query => Query.And(q, Query.Equal("region", "east"))
}
}
val offsets = exprs
.collect {
case t: StyleExpr =>
t.expr.dataExprs.map(_.offset)
}
.flatten
.distinct
assertEquals(offsets, List(Duration.ofHours(1)))
}
// https://github.com/Netflix/atlas/issues/809
test("percentiles, offset maintained in toString after query rewrite") {
val exprs = rawEval("name,a,:eq,(,99,),:percentiles,1h,:offset").map { expr =>
expr.rewrite {
case q: Query => Query.And(q, Query.Equal("region", "east"))
}
}
val actual = exprs.mkString(",")
val expected = "name,a,:eq,region,east,:eq,:and,(,99.0,),:percentiles,PT1H,:offset"
assertEquals(actual, expected)
}
test("freeze works with named rewrite, cq") {
val actual = eval("name,a,:eq,:freeze,name,b,:eq,:avg,:list,(,app,foo,:eq,:cq,),:each")
val expected = eval("name,a,:eq,name,b,:eq,:avg,app,foo,:eq,:cq")
assertEquals(actual, expected)
}
test("freeze works with named rewrite, add") {
val actual = eval("name,a,:eq,:freeze,name,b,:eq,:avg,:list,(,42,:add,),:each")
val expected = eval("name,a,:eq,name,b,:eq,:avg,42,:add")
assertEquals(actual, expected)
}
test("pct rewrite") {
val actual = eval("name,a,:eq,(,b,),:by,:pct")
val expected = eval("name,a,:eq,(,b,),:by,:dup,:sum,:div,100,:mul")
assertEquals(actual, expected)
}
test("pct rewrite with cq") {
val actual = eval("name,a,:eq,(,b,),:by,:pct,c,:has,:cq")
val expected = eval("name,a,:eq,(,b,),:by,:dup,:sum,:div,100,:mul,c,:has,:cq")
assertEquals(actual, expected)
}
test("pct after binary op") {
val actual = eval("name,a,:eq,(,b,),:by,10,:mul,:pct")
val expected = eval("name,a,:eq,(,b,),:by,10,:mul,:dup,:sum,:div,100,:mul")
assertEquals(actual, expected)
}
// https://github.com/Netflix/atlas/issues/791
test("pct after binary op with cq") {
val actual = eval("name,a,:eq,(,b,),:by,10,:mul,:pct,c,:has,:cq")
val expected = eval("name,a,:eq,(,b,),:by,10,:mul,:dup,:sum,:div,100,:mul,c,:has,:cq")
assertEquals(actual, expected)
}
test("issue-763: avg with cf-max") {
val actual = eval("name,a,:eq,:avg,:cf-max")
val expected = eval("name,a,:eq,:sum,:cf-max,name,a,:eq,:count,:cf-max,:div")
assertEquals(actual, expected)
}
test("issue-1021: offset with des macros, af") {
val actual = eval("name,a,:eq,:des-fast,1w,:offset,foo,bar,:eq,:cq")
val expected = eval("name,a,:eq,foo,bar,:eq,:and,:des-fast,1w,:offset")
assertEquals(actual, expected)
}
test("issue-1021: offset with des macros, math") {
val actual = eval("name,a,:eq,:sum,:des-fast,1w,:offset,foo,bar,:eq,:cq")
val expected = eval("name,a,:eq,foo,bar,:eq,:and,:sum,:des-fast,1w,:offset")
assertEquals(actual, expected)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/validation/RuleSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.validation
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class RuleSuite extends FunSuite {
private val config =
ConfigFactory.parseString(
"""
|rules = [
| {
| class = "com.netflix.atlas.core.validation.HasKeyRule"
| key = "name"
| },
| {
| class = "com.netflix.atlas.core.validation.KeyLengthRule"
| min-length = 2
| max-length = 60
| },
| {
| class = "com.netflix.atlas.core.validation.ValueLengthRule"
| min-length = 1
| max-length = 120
| },
| {
| class = "com.netflix.atlas.core.validation.ValidCharactersRule"
| },
| {
| class = "com.netflix.atlas.core.validation.KeyPatternRule"
| pattern = "^[-_.a-zA-Z0-9]+$"
| },
| {
| class = "com.netflix.atlas.core.validation.ValuePatternRule"
| pattern = "^[-_.a-zA-Z0-9]+$"
| },
| {
| class = "com.netflix.atlas.core.validation.MaxUserTagsRule"
| limit = 20
| },
| {
| class = "com.netflix.atlas.core.validation.ConfigConstructorTestRule"
| },
| {
| class = "com.netflix.atlas.core.validation.JavaTestRule"
| }
|]
""".stripMargin
)
test("load") {
val rules = Rule.load(config.getConfigList("rules"))
assertEquals(rules.size, 9)
}
test("load, useComposite") {
val rules = Rule.load(config.getConfigList("rules"), true)
assertEquals(rules.size, 3)
assert(rules.head.isInstanceOf[CompositeTagRule])
assertEquals(rules.head.asInstanceOf[CompositeTagRule].tagRules.size, 7)
}
test("validate ok") {
val rules = Rule.load(config.getConfigList("rules"))
val res = Rule.validate(Map("name" -> "foo", "status" -> "2xx"), rules)
assert(res.isSuccess)
}
test("validate failure") {
val rules = Rule.load(config.getConfigList("rules"))
val res = Rule.validate(Map("name" -> "foo", "status" -> "2 xx"), rules)
assert(res.isFailure)
}
}
|
dmuino/atlas | atlas-postgres/src/test/scala/com/netflix/atlas/postgres/TextCopyBufferSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.postgres
import com.netflix.atlas.core.model.ItemIdCalculator
import com.netflix.atlas.core.util.SortedTagMap
import munit.FunSuite
import java.io.Reader
class TextCopyBufferSuite extends FunSuite {
test("too small") {
intercept[IllegalArgumentException] {
new TextCopyBuffer(0)
}
}
test("avoid endless loop") {
val buffer = new TextCopyBuffer(1)
assert(buffer.putString("").nextRow())
buffer.clear()
intercept[IllegalStateException] {
buffer.putString("foo").nextRow()
}
}
test("putId") {
val buffer = new TextCopyBuffer(100)
val id = ItemIdCalculator.compute(SortedTagMap("a" -> "1"))
buffer.putId(id)
assertEquals(buffer.toString, s"$id\t")
}
test("putString") {
val buffer = new TextCopyBuffer(100)
buffer.putString("foo")
assertEquals(buffer.toString, "foo\t")
}
test("putString null") {
val buffer = new TextCopyBuffer(100)
buffer.putString(null)
assertEquals(buffer.toString, "\\N\t")
}
test("putString escape") {
val buffer = new TextCopyBuffer(100)
buffer.putString("\b\f\n\r\t\u000b\"\\")
assertEquals(buffer.toString, "\\b\\f\\n\\r\\t\\v\\\"\\\\\t")
}
test("putString escape disabled") {
val buffer = new TextCopyBuffer(100, false)
buffer.putString("\b\f\n\r\t\u000b\"\\")
assertEquals(buffer.toString, "\b\f\n\r\t\u000b\"\\\t")
}
test("putString not enough space") {
val buffer = new TextCopyBuffer(2)
buffer.putString("foo")
assert(!buffer.hasRemaining)
}
test("putString not enough space after escaping") {
val buffer = new TextCopyBuffer(6)
buffer.putString("foo\n\t")
assert(!buffer.hasRemaining)
}
test("putTagsJson") {
val buffer = new TextCopyBuffer(100)
val tags = SortedTagMap("a" -> "1", "b" -> "2")
buffer.putTagsJson(tags)
assertEquals(buffer.toString, "{\"a\":\"1\",\"b\":\"2\"}\t")
}
test("putTagsJson empty") {
val buffer = new TextCopyBuffer(100)
val tags = SortedTagMap.empty
buffer.putTagsJson(tags)
assertEquals(buffer.toString, "{}\t")
}
test("putTagsJsonb") {
val buffer = new TextCopyBuffer(100)
val tags = SortedTagMap("a" -> "1", "b" -> "2")
buffer.putTagsJsonb(tags)
assertEquals(buffer.toString, "{\"a\":\"1\",\"b\":\"2\"}\t")
}
test("putTagsHstore") {
val buffer = new TextCopyBuffer(100)
val tags = SortedTagMap("a" -> "1", "b" -> "2")
buffer.putTagsHstore(tags)
assertEquals(buffer.toString, "\"a\"=>\"1\",\"b\"=>\"2\"\t")
}
test("putTagsHstore empty") {
val buffer = new TextCopyBuffer(100)
val tags = SortedTagMap.empty
buffer.putTagsHstore(tags)
assertEquals(buffer.toString, "\t")
}
test("putTagsText") {
val buffer = new TextCopyBuffer(100)
val tags = SortedTagMap("a" -> "1", "b" -> "2")
buffer.putTagsText(tags)
assertEquals(buffer.toString, "{\"a\":\"1\",\"b\":\"2\"}\t")
}
test("putShort") {
val buffer = new TextCopyBuffer(100)
buffer.putShort(42)
assertEquals(buffer.toString, "42\t")
}
test("putInt") {
val buffer = new TextCopyBuffer(100)
buffer.putInt(42)
assertEquals(buffer.toString, "42\t")
}
test("putLong") {
val buffer = new TextCopyBuffer(100)
buffer.putLong(42L)
assertEquals(buffer.toString, "42\t")
}
test("putDouble") {
val buffer = new TextCopyBuffer(100)
buffer.putDouble(42.0)
assertEquals(buffer.toString, "42.0\t")
}
test("putDouble NaN") {
val buffer = new TextCopyBuffer(100)
buffer.putDouble(Double.NaN)
assertEquals(buffer.toString, "NaN\t")
}
test("putDouble Infinity") {
val buffer = new TextCopyBuffer(100)
buffer.putDouble(Double.PositiveInfinity)
assertEquals(buffer.toString, "Infinity\t")
}
test("putDouble -Infinity") {
val buffer = new TextCopyBuffer(100)
buffer.putDouble(Double.NegativeInfinity)
assertEquals(buffer.toString, "-Infinity\t")
}
test("putDoubleArray") {
val buffer = new TextCopyBuffer(100)
buffer.putDoubleArray(Array.empty).putDoubleArray(Array(1.0, 1.5, 2.0, 2.5))
assertEquals(buffer.toString, "{}\t{1.0,1.5,2.0,2.5}\t")
}
test("nextRow") {
val buffer = new TextCopyBuffer(100)
buffer.putString("foo").putString("bar").nextRow()
assertEquals(buffer.toString, "foo\tbar\n")
}
test("nextRow on empty row") {
val buffer = new TextCopyBuffer(100)
intercept[IllegalStateException] {
buffer.nextRow()
}
buffer.putInt(0).nextRow()
intercept[IllegalStateException] {
buffer.nextRow()
}
}
private def toString(reader: Reader): String = {
val builder = new StringBuilder
val buf = new Array[Char](128)
var length = reader.read(buf)
while (length > 0) {
builder.appendAll(buf, 0, length)
length = reader.read(buf)
}
builder.toString()
}
test("reader") {
val buffer = new TextCopyBuffer(100)
buffer.putInt(0).putString("foo").nextRow()
assertEquals(toString(buffer.reader()), "0\tfoo\n")
}
test("reader with partial row") {
val buffer = new TextCopyBuffer(9)
assert(buffer.putInt(0).putString("foo").nextRow())
assert(!buffer.putInt(1).putString("bar").nextRow())
assertEquals(toString(buffer.reader()), "0\tfoo\n")
}
test("remaining") {
val buffer = new TextCopyBuffer(4)
buffer.putInt(2)
assert(buffer.hasRemaining)
assertEquals(buffer.remaining, 2)
buffer.putString("foo")
assert(!buffer.hasRemaining)
assertEquals(buffer.remaining, 0)
}
test("rows") {
val buffer = new TextCopyBuffer(100)
var i = 0
while (buffer.putInt(i).nextRow()) {
i = i + 1
assertEquals(buffer.rows, i)
}
assertEquals(buffer.rows, i)
}
test("clear") {
val buffer = new TextCopyBuffer(100)
buffer.putInt(0).putString("foo").nextRow()
assertEquals(toString(buffer.reader()), "0\tfoo\n")
buffer.clear()
buffer.putInt(1).putString("bar").nextRow()
assertEquals(toString(buffer.reader()), "1\tbar\n")
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/validation/ReservedKeyRuleSuite.scala | <reponame>dmuino/atlas
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.validation
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class ReservedKeyRuleSuite extends FunSuite {
private val config = ConfigFactory.parseString("""
|prefix = "nf."
|allowed-keys = ["region", "job", "task"]
""".stripMargin)
private val rule = ReservedKeyRule(config)
private def validate(k: String, v: String): ValidationResult = {
rule.validate(Map(k -> v))
}
test("valid") {
assertEquals(validate("nf.region", "def"), ValidationResult.Pass)
}
test("valid, no reserved prefix") {
assertEquals(validate("foo", "def"), ValidationResult.Pass)
}
test("invalid") {
val res = validate("nf.foo", "def")
assert(res.isFailure)
}
test("job") {
assertEquals(validate("nf.job", "def"), ValidationResult.Pass)
}
test("task") {
assertEquals(validate("nf.task", "def"), ValidationResult.Pass)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/norm/RollingValueFunctionSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
import munit.FunSuite
class RollingValueFunctionSuite extends FunSuite {
private def newFunction(step: Long) = {
val listVF = new ListValueFunction
val normalizeVF = new RollingValueFunction(step, (_, v) => v, listVF)
listVF.f = normalizeVF
listVF
}
test("values received in order") {
val f = newFunction(1L)
(0 until 20).foreach { i =>
val vs = f.update(i, i)
assert(vs == List(i -> i.toDouble))
}
f.close()
assertEquals(f.result(), Nil)
}
test("values received out of order") {
val f = newFunction(1L)
assertEquals(f.update(1, 1.0), List(1L -> 1.0))
assertEquals(f.update(2, 2.0), List(2L -> 2.0))
assertEquals(f.update(1, 0.5), List(1L -> 0.5))
assertEquals(f.update(3, 3.0), List(3L -> 3.0))
assertEquals(f.update(1, 0.0), Nil) // too old
f.close()
assertEquals(f.result(), Nil)
}
test("values with gaps") {
val f = newFunction(1L)
assertEquals(f.update(1, 1.0), List(1L -> 1.0))
assertEquals(f.update(5, 5.0), List(5L -> 5.0))
assertEquals(f.update(6, 6.0), List(6L -> 6.0))
assertEquals(f.update(9, 9.0), List(9L -> 9.0))
f.close()
assertEquals(f.result(), Nil)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/algorithm/OnlineRollingMeanSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.algorithm
class OnlineRollingMeanSuite extends BaseOnlineAlgorithmSuite {
override def newInstance: OnlineAlgorithm = OnlineRollingMean(50, 30)
test("n = 1, min = 1") {
val algo = OnlineRollingMean(1, 1)
assertEquals(algo.next(0.0), 0.0)
assertEquals(algo.next(1.0), 1.0)
}
test("n = 2, min = 1") {
val algo = OnlineRollingMean(2, 1)
assertEquals(algo.next(0.0), 0.0)
assertEquals(algo.next(1.0), 0.5)
assertEquals(algo.next(2.0), 1.5)
}
test("n = 2, min = 2") {
val algo = OnlineRollingMean(2, 2)
assert(algo.next(0.0).isNaN)
assertEquals(algo.next(1.0), 0.5)
assertEquals(algo.next(2.0), 1.5)
}
test("n = 2, min = 1, decreasing") {
val algo = OnlineRollingMean(2, 1)
assertEquals(algo.next(2.0), 2.0)
assertEquals(algo.next(1.0), 1.5)
assertEquals(algo.next(0.0), 0.5)
}
test("n = 2, min = 1, NaNs") {
val algo = OnlineRollingMean(2, 1)
assertEquals(algo.next(0.0), 0.0)
assertEquals(algo.next(1.0), 0.5)
assertEquals(algo.next(2.0), 1.5)
assertEquals(algo.next(Double.NaN), 2.0)
assert(!algo.isEmpty)
assert(algo.next(Double.NaN).isNaN)
assert(algo.isEmpty)
}
test("n = 2, min = 1, reset") {
val algo = OnlineRollingMean(2, 1)
assertEquals(algo.next(1.0), 1.0)
algo.reset()
assertEquals(algo.next(5.0), 5.0)
}
test("min < n") {
intercept[IllegalArgumentException] {
OnlineRollingMean(1, 2)
}
}
test("min = 0") {
intercept[IllegalArgumentException] {
OnlineRollingMean(2, 0)
}
}
test("min < 0") {
intercept[IllegalArgumentException] {
OnlineRollingMean(2, -4)
}
}
}
|
dmuino/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/HostSourceSuite.scala | <reponame>dmuino/atlas<gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import java.io.IOException
import java.nio.charset.StandardCharsets
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers._
import akka.stream.KillSwitches
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import akka.util.ByteString
import munit.FunSuite
import scala.concurrent.Await
import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import scala.util.Using
class HostSourceSuite extends FunSuite {
import scala.concurrent.duration._
implicit val system = ActorSystem(getClass.getSimpleName)
def source(response: => Try[HttpResponse]): Source[ByteString, NotUsed] = {
val client = Flow[HttpRequest].map(_ => response)
HostSource("http://localhost/api/test", client = client, delay = 1.milliseconds)
}
def compress(str: String): Array[Byte] = {
import com.netflix.atlas.core.util.Streams._
byteArray { out =>
Using.resource(gzip(out))(_.write(str.getBytes(StandardCharsets.UTF_8)))
}
}
test("ok") {
val response = HttpResponse(StatusCodes.OK, entity = ByteString("ok"))
val future = source(Success(response))
.take(5)
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
test("no size limit on data stream") {
val entity = HttpEntity(ByteString("ok")).withSizeLimit(1)
val response = HttpResponse(StatusCodes.OK, entity = entity)
val future = source(Success(response))
.take(5)
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
test("handles decompression") {
val headers = List(`Content-Encoding`(HttpEncodings.gzip))
val data = ByteString(compress("ok"))
val response = HttpResponse(StatusCodes.OK, headers = headers, entity = data)
val future = source(Success(response))
.take(5)
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
test("retries on error response from host") {
val response = HttpResponse(StatusCodes.BadRequest, entity = ByteString("error"))
val latch = new CountDownLatch(5)
val (switch, future) = source {
latch.countDown()
Success(response)
}.viaMat(KillSwitches.single)(Keep.right)
.toMat(Sink.ignore)(Keep.both)
.run()
// If it doesn't retry successfully this should time out and fail the test
latch.await(60, TimeUnit.SECONDS)
switch.shutdown()
Await.result(future, Duration.Inf)
}
test("retries on exception from host") {
val latch = new CountDownLatch(5)
val (switch, future) = source {
latch.countDown()
Failure(new IOException("cannot connect"))
}.viaMat(KillSwitches.single)(Keep.right)
.toMat(Sink.ignore)(Keep.both)
.run()
// If it doesn't retry successfully this should time out and fail the test
latch.await(60, TimeUnit.SECONDS)
switch.shutdown()
Await.result(future, Duration.Inf)
}
test("retries on exception from host entity source") {
val latch = new CountDownLatch(5)
val (switch, future) = source {
latch.countDown()
val source = Source.future(Future.failed[ByteString](new IOException("reset by peer")))
val entity = HttpEntity(MediaTypes.`text/event-stream`, source)
Success(HttpResponse(StatusCodes.OK, entity = entity))
}.viaMat(KillSwitches.single)(Keep.right)
.toMat(Sink.ignore)(Keep.both)
.run()
// If it doesn't retry successfully this should time out and fail the test
latch.await(60, TimeUnit.SECONDS)
switch.shutdown()
Await.result(future, Duration.Inf)
}
test("ref stops host source") {
val response = Success(HttpResponse(StatusCodes.OK, entity = ByteString("ok")))
val ref = EvaluationFlows.stoppableSource(source(response))
ref.stop()
val future = ref.source
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assert(result.isEmpty)
}
test("ref host source works until stopped") {
val response = Success(HttpResponse(StatusCodes.OK, entity = ByteString("ok")))
val ref = EvaluationFlows.stoppableSource(source(response))
val future = ref.source
.map(_.decodeString(StandardCharsets.UTF_8))
.take(5)
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
}
|
dmuino/atlas | atlas-akka/src/test/scala/com/netflix/atlas/akka/ConnectionContextFactorySuite.scala | <reponame>dmuino/atlas
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.akka
import akka.http.scaladsl.HttpsConnectionContext
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import munit.FunSuite
import javax.net.ssl.SSLContext
import javax.net.ssl.SSLEngine
class ConnectionContextFactorySuite extends FunSuite {
import ConnectionContextFactorySuite._
test("factory not set") {
val config = ConfigFactory.load()
val contextFactory = ConnectionContextFactory(config)
assert(contextFactory.isInstanceOf[ConfigConnectionContextFactory])
}
test("empty constructor") {
val config = ConfigFactory.parseString(s"""
|context-factory = "${classOf[EmptyConstructorFactory].getName}"
|""".stripMargin)
val contextFactory = ConnectionContextFactory(config)
assert(contextFactory.isInstanceOf[EmptyConstructorFactory])
}
test("config constructor") {
val config = ConfigFactory.parseString(s"""
|context-factory = "${classOf[ConfigConstructorFactory].getName}"
|ssl-config {
| correct-subconfig = true
|}
|""".stripMargin)
val contextFactory = ConnectionContextFactory(config)
assert(contextFactory.isInstanceOf[ConfigConstructorFactory])
}
}
object ConnectionContextFactorySuite {
class EmptyConstructorFactory extends ConnectionContextFactory {
override def sslContext: SSLContext = null
override def sslEngine: SSLEngine = null
override def httpsConnectionContext: HttpsConnectionContext = null
}
class ConfigConstructorFactory(config: Config) extends ConnectionContextFactory {
require(config.getBoolean("correct-subconfig"))
override def sslContext: SSLContext = null
override def sslEngine: SSLEngine = null
override def httpsConnectionContext: HttpsConnectionContext = null
}
}
|
dmuino/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/EurekaGroupsLookupSuite.scala | <gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.StatusCodes
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.netflix.atlas.akka.AccessLogger
import com.netflix.atlas.json.Json
import munit.FunSuite
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Success
class EurekaGroupsLookupSuite extends FunSuite {
import EurekaSource._
import Evaluator._
private implicit val system = ActorSystem(getClass.getSimpleName)
private implicit val mat = Materializer(system)
private val eurekaGroup = EurekaSource.VipResponse(
uri = "http://eureka/v2/vips/atlas-lwcapi:7001",
applications = EurekaSource.Apps(
List(
EurekaSource.App("one", mkInstances("one", 5)),
EurekaSource.App("two", mkInstances("two", 3))
)
)
)
private def mkInstances(name: String, n: Int): List[EurekaSource.Instance] = {
(0 until n).toList.map { i =>
EurekaSource.Instance(
instanceId = f"$name-$i%05d",
status = "UP",
dataCenterInfo = DataCenterInfo("Amazon", Map("host" -> s"$name.$i")),
port = PortInfo(7101)
)
}
}
private def sources(vs: DataSource*): DataSources = {
DataSources.of(vs: _*)
}
private def ds(id: String, uri: String): DataSource = {
new DataSource(id, java.time.Duration.ofMinutes(1), uri)
}
private def lookupFlow: Flow[DataSources, Source[SourcesAndGroups, NotUsed], NotUsed] = {
val client = Flow[(HttpRequest, AccessLogger)]
.map {
case (_, v) =>
val json = Json.encode(eurekaGroup)
Success(HttpResponse(StatusCodes.OK, entity = json)) -> v
}
val context = TestContext.createContext(mat, client)
Flow[DataSources].via(new EurekaGroupsLookup(context, 5.microseconds))
}
private def run(input: List[DataSources], n: Int = 1): List[SourcesAndGroups] = {
val future = Source(input)
.concat(Source.repeat(input.last)) // Need to avoid source stopping until sink is full
.via(lookupFlow)
.flatMapConcat(s => s)
.take(n)
.fold(List.empty[SourcesAndGroups]) { (acc, v) =>
v :: acc
}
.runWith(Sink.head)
Await.result(future, Duration.Inf)
}
test("empty sources produces 1 empty sources") {
val input = List(
DataSources.empty()
)
val output = run(input)
assertEquals(output.size, 1)
assertEquals(output.head._1.getSources.size(), 0)
assertEquals(output.head._2.groups.size, 0)
}
test("one data source") {
val input = List(
sources(ds("a", "http://atlas/api/v1/graph?q=name,jvm.gc.pause,:eq,:dist-avg"))
)
val output = run(input)
assertEquals(output.head._2.groups.size, 1)
assertEquals(output.head._2.groups.head, eurekaGroup)
}
test("unknown data source") {
val input = List(
sources(ds("a", "http://unknown/api/v1/graph?q=name,jvm.gc.pause,:eq,:dist-avg"))
)
val output = run(input)
assertEquals(output.head._2.groups.size, 0)
// TODO: check for diagnostic message
}
test("groups for data source are refreshed") {
val input = List(
sources(ds("a", "http://atlas/api/v1/graph?q=name,jvm.gc.pause,:eq,:dist-avg"))
)
val output = run(input, 5)
assertEquals(output.size, 5)
output.foreach {
case (_, g) => assertEquals(g.groups.size, 1)
}
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/stacklang/FreezeSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.stacklang
import com.netflix.atlas.core.util.Features
import munit.FunSuite
class FreezeSuite extends FunSuite {
def interpreter: Interpreter = Interpreter(StandardVocabulary.allWords)
test("basic operation") {
val context = interpreter.execute("a,b,c,:freeze")
assertEquals(context.stack, List("c", "b", "a"))
assert(context.frozenStack.isEmpty)
}
test("frozen stack is isolated") {
val context = interpreter.execute("a,b,c,:freeze,d,e,f,:clear")
assertEquals(context.stack, List("c", "b", "a"))
assert(context.frozenStack.isEmpty)
}
test("variables are cleared") {
val e = intercept[NoSuchElementException] {
interpreter.execute("foo,1,:set,:freeze,foo,:get")
}
assertEquals(e.getMessage, "key not found: foo")
}
test("original variables are preserved") {
val vars = Map("foo" -> "original", "bar" -> "2")
val context = interpreter.execute("foo,1,:set,:freeze,foo,:get,bar,:get", vars, Features.STABLE)
assertEquals(context.stack, List("2", "original"))
}
test("multiple freeze operations") {
val context = interpreter.execute("a,b,c,:freeze,d,e,f,:freeze,g,h,i,:freeze,j,k,l,:clear")
assertEquals(context.stack, List("i", "h", "g", "f", "e", "d", "c", "b", "a"))
assert(context.frozenStack.isEmpty)
}
test("freeze works with macros") {
// Before macros would force unfreeze after execution
val context = interpreter.execute("a,b,:freeze,d,e,:2over,:clear")
assertEquals(context.stack, List("b", "a"))
assert(context.frozenStack.isEmpty)
}
test("freeze works with :call") {
val context = interpreter.execute("a,b,:freeze,d,(,:dup,),:call,:clear")
assertEquals(context.stack, List("b", "a"))
assert(context.frozenStack.isEmpty)
}
test("freeze works with :each") {
val context = interpreter.execute("a,b,:freeze,(,d,),(,:dup,),:each,:clear")
assertEquals(context.stack, List("b", "a"))
assert(context.frozenStack.isEmpty)
}
test("freeze works with :map") {
val context = interpreter.execute("a,b,:freeze,(,d,),(,:dup,),:map,:clear")
assertEquals(context.stack, List("b", "a"))
assert(context.frozenStack.isEmpty)
}
test("freeze works with :get/:set") {
val context = interpreter.execute("a,b,:freeze,d,e,:set,d,:get,:clear")
assertEquals(context.stack, List("b", "a"))
assert(context.frozenStack.isEmpty)
}
}
|
dmuino/atlas | atlas-postgres/src/main/scala/com/netflix/atlas/postgres/TextCopyBuffer.scala | <filename>atlas-postgres/src/main/scala/com/netflix/atlas/postgres/TextCopyBuffer.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.postgres
import com.netflix.atlas.core.model.ItemId
import com.netflix.atlas.core.util.CharBufferReader
import com.netflix.atlas.core.util.SortedTagMap
import org.postgresql.copy.CopyManager
import java.io.Reader
import java.nio.CharBuffer
/**
* Copy buffer that stores the data in the [text format].
*
* [text format]: https://www.postgresql.org/docs/13/sql-copy.html#id-1.9.3.55.9.2
*
* @param size
* Size of the underlying character buffer. Must be at least 1.
* @param shouldEscapeValues
* True if string values should be escaped before being written to the buffer. For
* sources where the values are known not to include any special characters this
* can be disabled to improve performance.
*/
class TextCopyBuffer(size: Int, shouldEscapeValues: Boolean = true) extends CopyBuffer {
require(size >= 1, "buffer size must be at least 1")
private val data = CharBuffer.allocate(size)
private var numRows = 0
override def putId(id: ItemId): CopyBuffer = {
putString(id.toString)
}
private def put(c: Char): TextCopyBuffer = {
if (data.remaining() >= 1) {
data.append(c)
} else {
data.limit(data.capacity())
}
this
}
private def put(str: String): TextCopyBuffer = {
if (data.remaining() >= str.length) {
data.append(str)
} else {
data.limit(data.capacity())
}
this
}
private def escapeAndPut(str: String): TextCopyBuffer = {
if (shouldEscapeValues) {
val n = str.length
var i = 0
while (i < n) {
str.charAt(i) match {
case '\b' => put("\\b")
case '\f' => put("\\f")
case '\n' => put("\\n")
case '\r' => put("\\r")
case '\t' => put("\\t")
case 0x0b => put("\\v")
case '"' => put("\\\"")
case '\\' => put("\\\\")
case c => put(c)
}
i += 1
}
this
} else {
put(str)
}
}
private def putQuotedString(str: String): TextCopyBuffer = {
put('"').escapeAndPut(str).put('"')
}
private def putKeyValue(k: String, sep: String, v: String): TextCopyBuffer = {
putQuotedString(k).put(sep).putQuotedString(v)
}
override def putString(str: String): CopyBuffer = {
if (str == null)
put("\\N\t")
else
escapeAndPut(str).put('\t')
}
@scala.annotation.tailrec
private def putJson(tags: SortedTagMap, i: Int): TextCopyBuffer = {
if (i < tags.size) {
put(',').putKeyValue(tags.key(i), ":", tags.value(i)).putJson(tags, i + 1)
} else {
this
}
}
override def putTagsJson(tags: SortedTagMap): CopyBuffer = {
if (tags.nonEmpty) {
put('{').putKeyValue(tags.key(0), ":", tags.value(0)).putJson(tags, 1).put("}\t")
} else {
put("{}\t")
}
}
override def putTagsJsonb(tags: SortedTagMap): CopyBuffer = {
putTagsJson(tags)
}
@scala.annotation.tailrec
private def putHstore(tags: SortedTagMap, i: Int): TextCopyBuffer = {
if (i < tags.size) {
put(',').putKeyValue(tags.key(i), "=>", tags.value(i)).putHstore(tags, i + 1)
} else {
this
}
}
override def putTagsHstore(tags: SortedTagMap): CopyBuffer = {
if (tags.nonEmpty) {
putKeyValue(tags.key(0), "=>", tags.value(0)).putHstore(tags, 1).put('\t')
} else {
put('\t')
}
}
override def putTagsText(tags: SortedTagMap): CopyBuffer = {
putTagsJson(tags)
}
override def putShort(value: Short): CopyBuffer = {
putString(value.toString)
}
override def putInt(value: Int): CopyBuffer = {
putString(value.toString)
}
override def putLong(value: Long): CopyBuffer = {
putString(value.toString)
}
override def putDouble(value: Double): CopyBuffer = {
putString(value.toString)
}
@scala.annotation.tailrec
private def putDoubleArray(values: Array[Double], i: Int): TextCopyBuffer = {
if (i < values.length) {
put(',').put(values(i).toString).putDoubleArray(values, i + 1)
} else {
this
}
}
override def putDoubleArray(values: Array[Double]): CopyBuffer = {
if (values.length > 0) {
put('{').put(values(0).toString).putDoubleArray(values, 1).put("}\t")
} else {
put("{}\t")
}
}
override def nextRow(): Boolean = {
val i = data.position() - 1
if (i < 0 || data.get(i) == '\n' || (data.get(i) != '\t' && numRows == 0)) {
throw new IllegalStateException(
"nextRow() called on empty row, usually means a row is too big to fit"
)
} else if (data.get(i) == '\t') {
numRows += 1
data.position(i)
data.put('\n')
data.mark()
true
} else {
// partial row written
false
}
}
override def hasRemaining: Boolean = {
data.hasRemaining
}
override def remaining: Int = {
data.remaining()
}
override def rows: Int = numRows
override def clear(): Unit = {
data.clear()
numRows = 0
}
def reader(): Reader = {
// Buffer is marked for each new row, reset is used to ignore a partial row that
// could not fit in the buffer.
data.reset().flip()
new CharBufferReader(data)
}
override def copyIn(copyManager: CopyManager, table: String): Unit = {
val copySql = s"copy $table from stdin (format text)"
copyManager.copyIn(copySql, reader())
}
override def toString: String = {
new String(data.array(), 0, data.position())
}
}
|
dmuino/atlas | atlas-lwcapi/src/main/scala/com/netflix/atlas/lwcapi/StreamMetadata.scala | <reponame>dmuino/atlas<gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.lwcapi
import java.util.concurrent.atomic.AtomicLong
/**
* Metadata for a stream.
*
* @param streamId
* Unique id for a stream specified by the caller. Used to route data and detect
* reconnections.
* @param remoteAddress
* IP address of the remote consumer. Only used to help with debugging.
* @param receivedMessages
* Number of messages that were successfully received.
* @param droppedMessages
* Number of messages that were dropped because the queue was full.
*/
case class StreamMetadata(
streamId: String,
remoteAddress: String = "unknown",
receivedMessages: AtomicLong = new AtomicLong(),
droppedMessages: AtomicLong = new AtomicLong()
)
|
dmuino/atlas | atlas-jmh/src/main/scala/com/netflix/atlas/core/validation/TagRules.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.validation
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.core.util.SortedTagMap
import com.netflix.spectator.impl.AsciiSet
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.infra.Blackhole
/**
* TagRule needs to loop over all the entries in the map. This benchmark compares
* looping over the tags for each rule to looping over the rules for each tag.
*
* ```
* > jmh:run -prof gc -wi 10 -i 10 -f1 -t1 .*TagRules.*
* ```
*
* Throughput
*
* ```
* Benchmark Mode Cnt Score Error Units
* composite thrpt 10 1345178.780 ± 220318.775 ops/s
* compositeSorted thrpt 10 1330052.142 ± 100088.503 ops/s
* separate thrpt 10 1827998.619 ± 126933.939 ops/s
* separateSorted thrpt 10 2085370.740 ± 262945.382 ops/s
* ```
*/
@State(Scope.Thread)
class TagRules {
private val tags = SmallHashMap(
"nf.app" -> "atlas_backend",
"nf.cluster" -> "atlas_backend-dev",
"nf.asg" -> "atlas_backend-dev-v001",
"nf.stack" -> "dev",
"nf.region" -> "us-east-1",
"nf.zone" -> "us-east-1e",
"nf.node" -> "i-123456789",
"nf.ami" -> "ami-987654321",
"nf.vmtype" -> "r3.2xlarge",
"name" -> "jvm.gc.pause",
"cause" -> "Allocation_Failure",
"action" -> "end_of_major_GC",
"statistic" -> "totalTime"
)
private val sortedTags = SortedTagMap(tags)
private val rules = List(
KeyLengthRule(2, 80),
NameValueLengthRule(ValueLengthRule(2, 255), ValueLengthRule(2, 120)),
ValidCharactersRule(
AsciiSet.fromPattern("-._A-Za-z0-9"),
Map.empty.withDefaultValue(AsciiSet.fromPattern("-._A-Za-z0-9"))
),
ReservedKeyRule(
"nf.",
Set(
"nf.app",
"nf.cluster",
"nf.asg",
"nf.stack",
"nf.region",
"nf.zone",
"nf.node",
"nf.ami",
"nf.vmtype"
)
),
ReservedKeyRule("atlas.", Set("legacy"))
)
private val composite = CompositeTagRule(rules)
@Benchmark
def separate(bh: Blackhole): Unit = {
bh.consume(Rule.validate(tags, rules))
}
@Benchmark
def separateSorted(bh: Blackhole): Unit = {
bh.consume(Rule.validate(sortedTags, rules))
}
@Benchmark
def composite(bh: Blackhole): Unit = {
bh.consume(composite.validate(tags))
}
@Benchmark
def compositeSorted(bh: Blackhole): Unit = {
bh.consume(composite.validate(sortedTags))
}
}
|
dmuino/atlas | atlas-lwcapi/src/main/scala/com/netflix/atlas/lwcapi/EvaluateApi.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.lwcapi
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.RemoteAddress
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.netflix.atlas.akka.CustomDirectives._
import com.netflix.atlas.akka.WebApi
import com.netflix.atlas.eval.model.LwcDatapoint
import com.netflix.atlas.eval.model.LwcDiagnosticMessage
import com.netflix.atlas.json.JsonSupport
import com.netflix.spectator.api.Registry
import com.typesafe.scalalogging.StrictLogging
class EvaluateApi(registry: Registry, sm: StreamSubscriptionManager)
extends WebApi
with StrictLogging {
import EvaluateApi._
private val payloadSize = registry.distributionSummary("atlas.lwcapi.evalPayloadSize")
private val ignoredCounter = registry.counter("atlas.lwcapi.ignoredItems")
def routes: Route = {
endpointPath("lwc" / "api" / "v1" / "evaluate") {
post {
extractClientIP { addr =>
parseEntity(json[EvaluateRequest]) { req =>
payloadSize.record(req.metrics.size)
val timestamp = req.timestamp
req.metrics.groupBy(_.id).foreach {
case (id, ms) =>
val datapoints = ms.map { m =>
LwcDatapoint(timestamp, m.id, m.tags, m.value)
}
evaluate(addr, id, datapoints)
}
req.messages.groupBy(_.id).foreach {
case (id, ms) => evaluate(addr, id, ms)
}
complete(HttpResponse(StatusCodes.OK))
}
}
}
}
}
private def evaluate(addr: RemoteAddress, id: String, msgs: Seq[JsonSupport]): Unit = {
val queues = sm.handlersForSubscription(id)
if (queues.nonEmpty) {
queues.foreach { queue =>
logger.trace(s"sending ${msgs.size} messages to $queue (from: $addr)")
queue.offer(msgs)
}
} else {
logger.debug(s"no subscriptions, ignoring ${msgs.size} messages (from: $addr)")
ignoredCounter.increment(msgs.size)
}
}
}
object EvaluateApi {
type TagMap = Map[String, String]
case class Item(id: String, tags: TagMap, value: Double)
case class EvaluateRequest(
timestamp: Long,
metrics: List[Item] = Nil,
messages: List[LwcDiagnosticMessage] = Nil
) extends JsonSupport
}
|
dmuino/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/TimeGroupedSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.netflix.atlas.core.model.DataExpr
import com.netflix.atlas.core.model.Query
import com.netflix.atlas.eval.model.AggrDatapoint
import com.netflix.atlas.eval.model.AggrValuesInfo
import com.netflix.atlas.eval.model.TimeGroup
import com.netflix.spectator.api.DefaultRegistry
import munit.FunSuite
import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration.Duration
class TimeGroupedSuite extends FunSuite {
private implicit val system = ActorSystem(getClass.getSimpleName)
private implicit val materializer = Materializer(system)
private val registry = new DefaultRegistry()
private val context = TestContext.createContext(materializer, registry = registry)
private val step = 10
private def result(future: Future[List[TimeGroup]]): List[TimeGroup] = {
Await
.result(future, Duration.Inf)
.reverse
.map(g =>
g.copy(dataExprValues = g.dataExprValues
.map(t => t._1 -> t._2.copy(values = t._2.values.sortWith(_.value < _.value)))
)
)
}
private def run(data: List[AggrDatapoint]): List[TimeGroup] = {
val future = Source(data)
.via(new TimeGrouped(context, 10))
.runFold(List.empty[TimeGroup])((acc, g) => g :: acc)
result(future)
}
private def timeGroup(t: Long, vs: List[AggrDatapoint]): TimeGroup = {
val expr = DataExpr.All(Query.True)
if (vs.isEmpty)
TimeGroup(t, step, Map.empty)
else
TimeGroup(t, step, Map(expr -> AggrValuesInfo(vs, vs.size)))
}
private def datapoint(t: Long, v: Int): AggrDatapoint = {
val expr = DataExpr.All(Query.True)
AggrDatapoint(t, step, expr, "test", Map.empty, v)
}
test("in order list") {
val data =
List(
datapoint(10, 1),
datapoint(10, 2),
datapoint(10, 3),
datapoint(20, 1),
datapoint(30, 1),
datapoint(30, 2)
)
val groups = run(data)
assertEquals(
groups,
List(
timeGroup(10, List(datapoint(10, 1), datapoint(10, 2), datapoint(10, 3))),
timeGroup(20, List(datapoint(20, 1))),
timeGroup(30, List(datapoint(30, 1), datapoint(30, 2)))
)
)
}
test("out of order list") {
val data =
List(
datapoint(20, 1),
datapoint(10, 2),
datapoint(10, 3),
datapoint(10, 1),
datapoint(30, 1),
datapoint(30, 2)
)
val groups = run(data)
assertEquals(
groups,
List(
timeGroup(10, List(datapoint(10, 1), datapoint(10, 2), datapoint(10, 3))),
timeGroup(20, List(datapoint(20, 1))),
timeGroup(30, List(datapoint(30, 1), datapoint(30, 2)))
)
)
}
private def count(id: String): Long = {
registry.counter("atlas.eval.datapoints", "id", id).count()
}
private def counts: (Long, Long) = {
count("buffered") -> (count("dropped-old") + count("dropped-future"))
}
test("late events dropped") {
val data = List(
datapoint(20, 1),
datapoint(10, 2),
datapoint(10, 3),
datapoint(10, 1),
datapoint(30, 1),
datapoint(30, 2),
datapoint(10, 4) // Dropped, came in late and out of window
)
val before = counts
val groups = run(data)
val after = counts
assertEquals(
groups,
List(
timeGroup(10, List(datapoint(10, 1), datapoint(10, 2), datapoint(10, 3))),
timeGroup(20, List(datapoint(20, 1))),
timeGroup(30, List(datapoint(30, 1), datapoint(30, 2)))
)
)
assertEquals(before._1 + 6, after._1) // 6 buffered messages
assertEquals(before._2 + 1, after._2) // 1 dropped message
}
test("future events dropped") {
val future = System.currentTimeMillis() + 60 * 60 * 1000
val data = List(
datapoint(20, 1),
datapoint(10, 2),
datapoint(10, 3),
datapoint(future + 10, 1), // Dropped, timestamp in the future
datapoint(30, 1),
datapoint(30, 2),
datapoint(10, 4) // Dropped, came in late and out of window
)
val before = counts
val groups = run(data)
val after = counts
assertEquals(
groups,
List(
timeGroup(10, List(datapoint(10, 2), datapoint(10, 3))),
timeGroup(20, List(datapoint(20, 1))),
timeGroup(30, List(datapoint(30, 1), datapoint(30, 2)))
)
)
assertEquals(before._1 + 5, after._1) // 5 buffered messages
assertEquals(before._2 + 2, after._2) // 2 dropped message
}
test("heartbeat will flush if no data for an interval") {
val data =
List(
AggrDatapoint.heartbeat(10, step),
datapoint(10, 1),
datapoint(10, 2),
datapoint(10, 3),
AggrDatapoint.heartbeat(20, step),
datapoint(30, 1),
datapoint(30, 2),
AggrDatapoint.heartbeat(30, step)
)
val groups = run(data)
assertEquals(
groups,
List(
timeGroup(10, List(datapoint(10, 1), datapoint(10, 2), datapoint(10, 3))),
timeGroup(20, Nil),
timeGroup(30, List(datapoint(30, 1), datapoint(30, 2)))
)
)
}
test("simple aggregate: sum") {
val n = 10000
val expr = DataExpr.Sum(Query.True)
val data = (0 until n).toList.map { i =>
AggrDatapoint(10, 10, expr, "test", Map.empty, i)
}
val expected = AggrDatapoint(10, 10, expr, "test", Map.empty, n * (n - 1) / 2)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, Map(expr -> AggrValuesInfo(List(expected), n)))))
}
test("simple aggregate: min") {
val n = 10000
val expr = DataExpr.Min(Query.True)
val data = (0 until n).toList.map { i =>
AggrDatapoint(10, 10, expr, "test", Map.empty, i)
}
val expected = AggrDatapoint(10, 10, expr, "test", Map.empty, 0)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, Map(expr -> AggrValuesInfo(List(expected), n)))))
}
test("simple aggregate: max") {
val n = 10000
val expr = DataExpr.Max(Query.True)
val data = (0 until n).toList.map { i =>
AggrDatapoint(10, 10, expr, "test", Map.empty, i)
}
val expected = AggrDatapoint(10, 10, expr, "test", Map.empty, n - 1)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, Map(expr -> AggrValuesInfo(List(expected), n)))))
}
test("simple aggregate: count") {
val n = 10000
val expr = DataExpr.Count(Query.True)
val data = (0 until n).toList.map { i =>
AggrDatapoint(10, 10, expr, "test", Map.empty, i)
}
val expected = AggrDatapoint(10, 10, expr, "test", Map.empty, n * (n - 1) / 2)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, Map(expr -> AggrValuesInfo(List(expected), n)))))
}
test("group by aggregate: sum") {
val n = 5000
val expr: DataExpr = DataExpr.GroupBy(DataExpr.Sum(Query.True), List("category"))
val data = (0 until 2 * n).toList.map { i =>
val category = if (i % 2 == 0) "even" else "odd"
AggrDatapoint(10, 10, expr, "test", Map("category" -> category), i)
}
val expected = Map(
expr -> AggrValuesInfo(
List(
AggrDatapoint(10, 10, expr, "test", Map("category" -> "even"), n * (n - 1)),
AggrDatapoint(10, 10, expr, "test", Map("category" -> "odd"), n * n)
),
2 * n
)
)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, expected)))
}
test("group by aggregate: min") {
val n = 10000
val expr: DataExpr = DataExpr.GroupBy(DataExpr.Min(Query.True), List("category"))
val data = (0 until n).toList.map { i =>
val category = if (i % 2 == 0) "even" else "odd"
AggrDatapoint(10, 10, expr, "test", Map("category" -> category), i)
}
val expected = Map(
expr -> AggrValuesInfo(
List(
AggrDatapoint(10, 10, expr, "test", Map("category" -> "even"), 0),
AggrDatapoint(10, 10, expr, "test", Map("category" -> "odd"), 1)
),
n
)
)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, expected)))
}
test("group by aggregate: max") {
val n = 10000
val expr: DataExpr = DataExpr.GroupBy(DataExpr.Max(Query.True), List("category"))
val data = (0 until n).toList.map { i =>
val category = if (i % 2 == 0) "even" else "odd"
AggrDatapoint(10, 10, expr, "test", Map("category" -> category), i)
}
val expected = Map(
expr -> AggrValuesInfo(
List(
AggrDatapoint(10, 10, expr, "test", Map("category" -> "even"), n - 2),
AggrDatapoint(10, 10, expr, "test", Map("category" -> "odd"), n - 1)
),
n
)
)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, expected)))
}
test("group by aggregate: count") {
val n = 5000
val expr: DataExpr = DataExpr.GroupBy(DataExpr.Count(Query.True), List("category"))
val data = (0 until 2 * n).toList.map { i =>
val category = if (i % 2 == 0) "even" else "odd"
AggrDatapoint(10, 10, expr, "test", Map("category" -> category), i)
}
val expected = Map(
expr -> AggrValuesInfo(
List(
AggrDatapoint(10, 10, expr, "test", Map("category" -> "even"), n * (n - 1)),
AggrDatapoint(10, 10, expr, "test", Map("category" -> "odd"), n * n)
),
2 * n
)
)
val groups = run(data)
assertEquals(groups, List(TimeGroup(10, step, expected)))
}
}
|
dmuino/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/util/CharBufferReader.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import java.io.Reader
import java.nio.CharBuffer
/**
* Wraps a CharBuffer so it can be used with interfaces that require a Reader. The buffer
* should not be modified outside of the reader until reading is complete.
*/
class CharBufferReader(buffer: CharBuffer) extends Reader {
override def read(cbuf: Array[Char], offset: Int, length: Int): Int = {
if (buffer.hasRemaining) {
val readLength = math.min(buffer.remaining(), length)
buffer.get(cbuf, offset, readLength)
readLength
} else {
-1
}
}
override def read(): Int = {
if (buffer.hasRemaining) buffer.get() else -1
}
override def ready(): Boolean = true
override def skip(n: Long): Long = {
val skipAmount = math.min(buffer.remaining(), n).toInt
buffer.position(buffer.position() + skipAmount)
skipAmount
}
override def markSupported(): Boolean = true
override def mark(readAheadLimit: Int): Unit = {
buffer.mark()
}
override def reset(): Unit = {
buffer.reset()
}
override def close(): Unit = {
buffer.flip()
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/CharBufferReaderSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import munit.FunSuite
import java.nio.CharBuffer
import scala.util.Using
class CharBufferReaderSuite extends FunSuite {
test("read()") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
assertEquals(reader.read(), 'a'.toInt)
assertEquals(reader.read(), 'b'.toInt)
assertEquals(reader.read(), 'c'.toInt)
assertEquals(reader.read(), -1)
}
test("read(cbuf)") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
val array = new Array[Char](2)
assertEquals(reader.read(array), 2)
assertEquals(array.toSeq, Array('a', 'b').toSeq)
assertEquals(reader.read(array), 1)
assertEquals(array.toSeq, Array('c', 'b').toSeq) // b left over from previous
assertEquals(reader.read(array), -1)
}
test("read(cbuf, offset, length)") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
val array = new Array[Char](5)
assertEquals(reader.read(array, 2, 3), 3)
assertEquals(array.toSeq, Array('\u0000', '\u0000', 'a', 'b', 'c').toSeq)
assertEquals(reader.read(array), -1)
}
test("ready()") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
assert(reader.ready())
}
test("skip()") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
assertEquals(reader.skip(2), 2L)
assertEquals(reader.read(), 'c'.toInt)
assertEquals(reader.read(), -1)
assertEquals(reader.skip(2), 0L)
}
test("mark() and reset()") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
assert(reader.markSupported())
assertEquals(reader.read(), 'a'.toInt)
reader.mark(5)
assertEquals(reader.read(), 'b'.toInt)
assertEquals(reader.read(), 'c'.toInt)
reader.reset()
assertEquals(reader.read(), 'b'.toInt)
reader.reset()
assertEquals(reader.read(), 'b'.toInt)
}
test("close()") {
val buffer = CharBuffer.wrap("abc")
val reader = new CharBufferReader(buffer)
(0 until 10).foreach { _ =>
Using.resource(reader) { r =>
assertEquals(r.read(), 'a'.toInt)
r.skip(100)
assertEquals(r.read(), -1)
}
}
}
}
|
dmuino/atlas | atlas-lwcapi/src/main/scala/com/netflix/atlas/lwcapi/StreamsApi.scala | <filename>atlas-lwcapi/src/main/scala/com/netflix/atlas/lwcapi/StreamsApi.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.lwcapi
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.netflix.atlas.akka.CustomDirectives._
import com.netflix.atlas.akka.DiagnosticMessage
import com.netflix.atlas.akka.WebApi
import com.netflix.atlas.json.Json
import javax.inject.Inject
/**
* Provides a summary of the current streams. This is to aide in debugging and can be
* disabled without impacting the service.
*/
class StreamsApi @Inject() (sm: StreamSubscriptionManager) extends WebApi {
def routes: Route = {
endpointPathPrefix("api" / "v1" / "streams") {
path(Remaining) { streamId =>
sm.streamSummary(streamId) match {
case Some(summary) => complete(Json.encode(summary))
case None => complete(notFound(streamId))
}
} ~
pathEnd {
complete(Json.encode(sm.streamSummaries.map(_.metadata)))
}
}
}
private def notFound(streamId: String): HttpResponse = {
val msg = DiagnosticMessage.info(s"no stream with id: $streamId")
HttpResponse(StatusCodes.NotFound, entity = Json.encode(msg))
}
}
|
dmuino/atlas | atlas-webapi/src/test/scala/com/netflix/atlas/webapi/ExprApiSuite.scala | <reponame>dmuino/atlas
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.webapi
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.testkit.RouteTestTimeout
import com.netflix.atlas.akka.RequestHandler
import com.netflix.atlas.akka.testkit.MUnitRouteSuite
import com.netflix.atlas.core.model.DataExpr
import com.netflix.atlas.core.model.MathExpr
import com.netflix.atlas.core.model.Query
import com.netflix.atlas.core.model.StyleExpr
import com.netflix.atlas.core.model.StyleVocabulary
import com.netflix.atlas.core.stacklang.Interpreter
import com.netflix.atlas.json.Json
class ExprApiSuite extends MUnitRouteSuite {
import scala.concurrent.duration._
implicit val routeTestTimeout = RouteTestTimeout(5.second)
val endpoint = new ExprApi
def testGet(uri: String)(f: => Unit): Unit = {
test(uri) {
Get(uri) ~> routes ~> check(f)
}
}
private def routes: Route = RequestHandler.standardOptions(endpoint.routes)
testGet("/api/v1/expr") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr?q=name,sps,:eq") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Output]](responseAs[String])
assertEquals(data.size, 4)
}
testGet("/api/v1/expr/debug") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Output]](responseAs[String])
assertEquals(data.size, 4)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,:sum,$name,:legend&vocab=style") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Output]](responseAs[String])
assertEquals(data.size, 7)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,:sum,$name,:legend,foo,:sset,foo,:get") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Output]](responseAs[String])
assertEquals(data.size, 11)
assert(data.last.context.variables("foo") == "name,sps,:eq,:sum,$name,:legend")
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,:sum,$name,:legend&vocab=query") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,cluster,:has&vocab=query") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,:clear&vocab=query") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,:sum,$name,:legend,foo") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/debug?q=name,sps,:eq,:sum,$name,:legend,foo,:clear") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/normalize") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/normalize?q=name,sps,:eq") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assertEquals(data, List("name,sps,:eq,:sum"))
}
testGet("/api/v1/expr/normalize?q=name,sps,:eq,:dup,2,:mul,:swap") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assertEquals(data, List("name,sps,:eq,:sum,2.0,:mul", "name,sps,:eq,:sum"))
}
testGet(
"/api/v1/expr/normalize?q=(,name,:swap,:eq,nf.cluster,foo,:eq,:and,:sum,),foo,:sset,cpu,foo,:fcall,disk,foo,:fcall"
) {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
val expected =
List("name,cpu,:eq,:sum", "name,disk,:eq,:sum", ":list,(,nf.cluster,foo,:eq,:cq,),:each")
assertEquals(data, expected)
}
testGet("/api/v1/expr/complete") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/complete?q=name,sps,:eq") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Candidate]](responseAs[String]).map(_.name)
assert(data.nonEmpty)
assert(!data.contains("add"))
}
testGet("/api/v1/expr/complete?q=name,sps,:eq,(,nf.cluster,)") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Candidate]](responseAs[String]).map(_.name)
assertEquals(data, List("by", "by", "offset", "palette"))
}
// TODO: Right now these fail. As a future improvement suggestions should be possible within
// a list context by ignoring everything outside of the list.
testGet("/api/v1/expr/complete?q=name,sps,:eq,(,nf.cluster,name") {
assertEquals(response.status, StatusCodes.BadRequest)
}
testGet("/api/v1/expr/complete?q=1,2") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[ExprApiSuite.Candidate]](responseAs[String]).map(_.name)
assert(data.nonEmpty)
assert(data.contains("add"))
}
testGet("/api/v1/expr/queries?q=1,2") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assert(data.isEmpty)
}
testGet("/api/v1/expr/queries?q=name,sps,:eq") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assertEquals(data, List("name,sps,:eq"))
}
testGet("/api/v1/expr/queries?q=name,sps,:eq,(,nf.cluster,),:by") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assertEquals(data, List("name,sps,:eq"))
}
testGet("/api/v1/expr/queries?q=name,sps,:eq,(,nf.cluster,),:by,:dup,:dup,4,:add") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assertEquals(data, List("name,sps,:eq"))
}
testGet("/api/v1/expr/queries?q=name,sps,:eq,(,nf.cluster,),:by,:true,:sum,name,:has,:add") {
assertEquals(response.status, StatusCodes.OK)
val data = Json.decode[List[String]](responseAs[String])
assertEquals(data, List(":true", "name,:has", "name,sps,:eq"))
}
import Query._
private def normalize(expr: String): List[String] = {
val interpreter = Interpreter(StyleVocabulary.allWords)
ExprApi.normalize(expr, interpreter)
}
test("normalize query order") {
val q1 = "app,foo,:eq,name,cpu,:eq,:and"
val q2 = "name,cpu,:eq,app,foo,:eq,:and"
assertEquals(normalize(q1), normalize(q2))
}
test("normalize single query") {
val e1 = DataExpr.Sum(And(Equal("app", "foo"), Equal("name", "cpuUser")))
val add = StyleExpr(MathExpr.Add(e1, e1), Map.empty)
assertEquals(
normalize(add.toString),
List(
":true,:sum,:true,:sum,:add",
":list,(,app,foo,:eq,name,cpuUser,:eq,:and,:cq,),:each"
)
)
}
test("normalize common query") {
val e1 = DataExpr.Sum(And(Equal("app", "foo"), Equal("name", "cpuUser")))
val e2 = DataExpr.Sum(And(Equal("app", "foo"), Equal("name", "cpuSystem")))
val add = StyleExpr(MathExpr.Add(e1, e2), Map.empty)
assertEquals(
normalize(add.toString),
List(
"name,cpuUser,:eq,:sum,name,cpuSystem,:eq,:sum,:add",
":list,(,app,foo,:eq,:cq,),:each"
)
)
}
test("normalize :avg") {
val avg = "app,foo,:eq,name,cpuUser,:eq,:and,:avg"
assertEquals(normalize(avg), List(avg))
}
test("normalize :dist-avg") {
val avg = "app,foo,:eq,name,cpuUser,:eq,:and,:dist-avg"
assertEquals(normalize(avg), List(avg))
}
test("normalize :dist-avg,(,nf.cluster,),:by") {
val avg = "app,foo,:eq,name,cpuUser,:eq,:and,:dist-avg,(,nf.cluster,),:by"
assertEquals(normalize(avg), List(avg))
}
test("normalize :dist-stddev") {
val stddev = "app,foo,:eq,name,cpuUser,:eq,:and,:dist-stddev"
assertEquals(normalize(stddev), List(stddev))
}
test("normalize :dist-max") {
val max = "app,foo,:eq,name,cpuUser,:eq,:and,:dist-max"
assertEquals(normalize(max), List(max))
}
test("normalize :dist-avg + expr2") {
val avg =
"app,foo,:eq,name,cpuUser,:eq,:and,:dist-avg,app,foo,:eq,name,cpuSystem,:eq,:and,:max"
assertEquals(
normalize(avg),
List(
"name,cpuUser,:eq,:dist-avg",
"name,cpuSystem,:eq,:max",
":list,(,app,foo,:eq,:cq,),:each"
)
)
}
test("normalize :avg,(,nf.cluster,),:by,:pct") {
val avg = "app,foo,:eq,name,cpuUser,:eq,:and,:avg,(,nf.cluster,),:by,:pct"
assertEquals(normalize(avg), List(avg))
}
test("normalize :des-fast") {
val expr = "app,foo,:eq,name,cpuUser,:eq,:and,:sum,:des-fast"
assertEquals(normalize(expr), List(expr))
}
test("normalize legend vars, include parenthesis") {
val expr = "name,cpuUser,:eq,:sum,$name,:legend"
val expected = "name,cpuUser,:eq,:sum,$(name),:legend"
assertEquals(normalize(expr), List(expected))
}
test("normalize legend vars, noop if parens already present") {
val expr = "name,cpuUser,:eq,:sum,$(name),:legend"
assertEquals(normalize(expr), List(expr))
}
test("normalize legend vars, mix") {
val expr = "name,cpuUser,:eq,:sum,foo$name$abc bar$(def)baz,:legend"
val expected = "name,cpuUser,:eq,:sum,foo$(name)$(abc) bar$(def)baz,:legend"
assertEquals(normalize(expr), List(expected))
}
}
object ExprApiSuite {
case class Output(program: List[String], context: Context)
case class Context(stack: List[String], variables: Map[String, String])
case class Candidate(name: String, signature: String, description: String)
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/stacklang/VocabularySuite.scala | <reponame>dmuino/atlas
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.stacklang
import munit.FunSuite
class VocabularySuite extends FunSuite {
import com.netflix.atlas.core.stacklang.VocabularySuite._
test("toMarkdown") {
val expected =
"""
|## call
|
|**Signature:** `? List -- ?`
|
|Pop a list off the stack and execute it as a program.
|
|**Examples**
|
|```
|Expr: (,a,),:call
|
| In:
| 1. List(a)
|Out:
| 1. a
|```
|
|## dup
|
|**Signature:** `a -- a a`
|
|Duplicate the item on the top of the stack.
|
|**Examples**
|
|```
|Expr: a,:dup
|
| In:
| 1. a
|Out:
| 2. a
| 1. a
|```
|
|```
|Expr: a,b,:dup
|
| In:
| 2. a
| 1. b
|Out:
| 3. a
| 2. b
| 1. b
|```
|
|```
|Expr: ,:dup
|
| In:
|
|Out:
|IllegalStateException: no matches for word ':dup' with stack [], candidates: [a -- a a]
|```
""".stripMargin.trim
assertEquals(TestVocabulary.toMarkdown, expected)
}
test("toMarkdown standard") {
// Make sure no exceptions are thrown
StandardVocabulary.toMarkdown
}
}
object VocabularySuite {
object TestVocabulary extends Vocabulary {
def name: String = "test"
def dependsOn: List[Vocabulary] = Nil
def words: List[Word] = List(
StandardVocabulary.Call,
StandardVocabulary.Dup
)
}
}
|
dmuino/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/norm/RollingValueFunction.scala | <gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
/**
* Value function that will aggregate all values received for a given step interval. Keeps
* a rolling buffer to allow out of order updates for a brief window and ensure that the
* normalized values will get output in order.
*
* @param step
* Normalized distance between samples produced by this class.
* @param aggr
* Aggregation function to use to combine values for the same interval.
* @param next
* Normalized values will be passed to the this function.
*/
class RollingValueFunction(
step: Long,
aggr: (Double, Double) => Double,
next: ValueFunction
) extends ValueFunction {
require(step >= 1, "step must be >= 1")
private[this] val size = 2
private val values = Array.fill[Double](size)(Double.NaN)
private var lastUpdateTime = -1L
/**
* Truncate the timestamp to the step boundary and pass the value to the next function if the
* actual timestamp on the measurement is newer than the last timestamp seen by this function.
*/
private def normalize(timestamp: Long): Long = {
val stepBoundary = timestamp / step * step
if (timestamp == stepBoundary)
stepBoundary
else
stepBoundary + step
}
override def apply(timestamp: Long, value: Double): Unit = {
val t = normalize(timestamp) / step
val delta = if (lastUpdateTime < 0) 1 else t - lastUpdateTime
if (delta == 0 || (delta < 0 && -delta < size)) {
// Update the current entry or old entry that is still within range
val i = (t % size).toInt
values(i) = aggr(values(i), value)
writeValue(t, values(i))
} else if (delta >= 1) {
// Create or overwrite an older entry
val i = (t % size).toInt
lastUpdateTime = t
values(i) = value
writeValue(t, values(i))
}
}
override def close(): Unit = {
lastUpdateTime = -1L
}
private def writeValue(timestamp: Long, value: Double): Unit = {
if (!value.isNaN) {
next(timestamp * step, value)
}
}
override def toString: String = {
s"${getClass.getSimpleName}(step=$step)"
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/RefIntHashMapSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import org.openjdk.jol.info.ClassLayout
import org.openjdk.jol.info.GraphLayout
import munit.FunSuite
import scala.util.Random
class RefIntHashMapSuite extends FunSuite {
import java.lang.{Long => JLong}
test("put") {
val m = new RefIntHashMap[JLong]
assertEquals(0, m.size)
m.put(11L, 42)
assertEquals(1, m.size)
assertEquals(Map(JLong.valueOf(11) -> 42), m.toMap)
}
test("putIfAbsent") {
val m = new RefIntHashMap[JLong]
assertEquals(0, m.size)
assert(m.putIfAbsent(11L, 42))
assertEquals(1, m.size)
assertEquals(Map(JLong.valueOf(11) -> 42), m.toMap)
assert(!m.putIfAbsent(11L, 43))
assertEquals(1, m.size)
assertEquals(Map(JLong.valueOf(11) -> 42), m.toMap)
}
test("get") {
val m = new RefIntHashMap[JLong]
assertEquals(m.get(42L, -1), -1)
m.put(11L, 27)
assertEquals(m.get(42L, -1), -1)
assertEquals(m.get(11L, -1), 27)
}
test("get - collisions") {
// Underlying capacity will be 11, next prime after 10, so 0 and multiples of 11
// will collide
val m = new RefIntHashMap[JLong]
m.put(0L, 0)
m.put(11L, 1)
m.put(22L, 2)
assertEquals(m.size, 3)
assertEquals(m.get(0L, -1), 0)
assertEquals(m.get(11L, -1), 1)
assertEquals(m.get(22L, -1), 2)
}
test("dedup") {
val m = new RefIntHashMap[JLong]
m.put(42L, 1)
assertEquals(Map(JLong.valueOf(42) -> 1), m.toMap)
assertEquals(1, m.size)
m.put(42L, 2)
assertEquals(Map(JLong.valueOf(42) -> 2), m.toMap)
assertEquals(1, m.size)
}
test("increment") {
val m = new RefIntHashMap[JLong]
assertEquals(0, m.size)
m.increment(42L)
assertEquals(1, m.size)
assertEquals(Map(JLong.valueOf(42) -> 1), m.toMap)
m.increment(42L)
assertEquals(1, m.size)
assertEquals(Map(JLong.valueOf(42) -> 2), m.toMap)
m.increment(42L, 7)
assertEquals(1, m.size)
assertEquals(Map(JLong.valueOf(42) -> 9), m.toMap)
}
test("increment - collisions") {
// Underlying capacity will be 11, next prime after 10, so 0 and multiples of 11
// will collide
val m = new RefIntHashMap[JLong]
m.increment(0L)
m.increment(11L)
m.increment(22L)
assertEquals(m.size, 3)
m.foreach { (_, v) =>
assertEquals(v, 1)
}
}
test("mapToArray") {
val m = new RefIntHashMap[JLong]
m.increment(0L)
m.increment(11L)
m.increment(22L)
val data = m.mapToArray(new Array[Long](m.size)) { (k, v) =>
k + v
}
assertEquals(data.toList, List(1L, 12L, 23L))
}
test("mapToArray -- invalid length") {
val m = new RefIntHashMap[JLong]
m.increment(0L)
m.increment(11L)
m.increment(22L)
intercept[IllegalArgumentException] {
m.mapToArray(new Array[Long](0)) { (k, v) =>
k + v
}
}
}
test("resize") {
val m = new RefIntHashMap[JLong]
(0 until 10000).foreach(i => m.put(i.toLong, i))
assertEquals((0 until 10000).map(i => JLong.valueOf(i) -> i).toMap, m.toMap)
}
test("resize - increment") {
val m = new RefIntHashMap[JLong]
(0 until 10000).foreach(i => m.increment(i.toLong, i))
assertEquals((0 until 10000).map(i => JLong.valueOf(i) -> i).toMap, m.toMap)
}
test("random") {
val jmap = new scala.collection.mutable.HashMap[JLong, Int]
val imap = new RefIntHashMap[JLong]
(0 until 10000).foreach { i =>
val v = Random.nextInt()
imap.put(v.toLong, i)
jmap.put(v, i)
}
assertEquals(jmap.toMap, imap.toMap)
assertEquals(jmap.size, imap.size)
}
test("memory per map") {
// Sanity check to verify if some change introduces more overhead per set
val bytes = ClassLayout.parseClass(classOf[RefIntHashMap[JLong]]).instanceSize()
assertEquals(bytes, 32L)
}
test("memory - 5 items") {
val imap = new RefIntHashMap[JLong]
val jmap = new java.util.HashMap[Long, Int](10)
(0 until 5).foreach { i =>
imap.put(i.toLong, i)
jmap.put(i, i)
}
val igraph = GraphLayout.parseInstance(imap)
//val jgraph = GraphLayout.parseInstance(jmap)
//println(igraph.toFootprint)
//println(jgraph.toFootprint)
// Only objects should be the key/value arrays and the map itself + 5 key objects
assertEquals(igraph.totalCount(), 8L)
// Sanity check size is < 300 bytes
assert(igraph.totalSize() <= 300)
}
test("memory - 10k items") {
val imap = new RefIntHashMap[JLong]
val jmap = new java.util.HashMap[Long, Int](10)
(0 until 10000).foreach { i =>
imap.put(i.toLong, i)
jmap.put(i, i)
}
val igraph = GraphLayout.parseInstance(imap)
//val jgraph = GraphLayout.parseInstance(jmap)
//println(igraph.toFootprint)
//println(jgraph.toFootprint)
// Only objects should be the key/value arrays and the map itself + 10000 key objects
assertEquals(igraph.totalCount(), 3L + 10000)
// Sanity check size is < 500kb
assert(igraph.totalSize() <= 500000)
}
test("negative absolute value") {
val s = new RefIntHashMap[RefIntHashMapSuite.MinHash]()
assertEquals(s.get(new RefIntHashMapSuite.MinHash, 0), 0)
}
}
object RefIntHashMapSuite {
class MinHash {
override def hashCode: Int = Integer.MIN_VALUE
}
}
|
dmuino/atlas | atlas-chart/src/test/scala/com/netflix/atlas/chart/graphics/ScalesSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.chart.graphics
import munit.FunSuite
class ScalesSuite extends FunSuite {
test("linear") {
val scale = Scales.linear(0.0, 100.0, 0, 100)
assertEquals(scale(0.0), 0)
assertEquals(scale(10.0), 10)
assertEquals(scale(20.0), 20)
assertEquals(scale(30.0), 30)
assertEquals(scale(40.0), 40)
assertEquals(scale(50.0), 50)
assertEquals(scale(60.0), 60)
assertEquals(scale(70.0), 70)
assertEquals(scale(80.0), 80)
assertEquals(scale(90.0), 90)
assertEquals(scale(100.0), 100)
}
test("ylinear_l1_u2_h300") {
val scale = Scales.yscale(Scales.linear)(1.0, 2.0, 0, 300)
assertEquals(scale(1.0), 300)
assertEquals(scale(2.0), 0)
}
test("logarithmic") {
val scale = Scales.logarithmic(0.0, 100.0, 0, 100)
assertEquals(scale(0.0), 0)
assertEquals(scale(10.0), 51)
assertEquals(scale(20.0), 65)
assertEquals(scale(30.0), 74)
assertEquals(scale(40.0), 80)
assertEquals(scale(50.0), 85)
assertEquals(scale(60.0), 89)
assertEquals(scale(70.0), 92)
assertEquals(scale(80.0), 95)
assertEquals(scale(90.0), 97)
assertEquals(scale(100.0), 100)
}
test("logarithmic negative") {
val scale = Scales.logarithmic(-100.0, 0.0, 0, 100)
assertEquals(scale(0.0), 100)
assertEquals(scale(-10.0), 48)
assertEquals(scale(-20.0), 34)
assertEquals(scale(-30.0), 25)
assertEquals(scale(-40.0), 19)
assertEquals(scale(-50.0), 14)
assertEquals(scale(-60.0), 10)
assertEquals(scale(-70.0), 7)
assertEquals(scale(-80.0), 4)
assertEquals(scale(-90.0), 2)
assertEquals(scale(-100.0), 0)
}
test("logarithmic positive and negative") {
val scale = Scales.logarithmic(-100.0, 100.0, 0, 100)
assertEquals(scale(100.0), 100)
assertEquals(scale(50.0), 92)
assertEquals(scale(10.0), 75)
assertEquals(scale(0.0), 50)
assertEquals(scale(-10.0), 24)
assertEquals(scale(-50.0), 7)
assertEquals(scale(-100.0), 0)
}
test("logarithmic less than lower bound") {
val scale = Scales.logarithmic(15.0, 100.0, 0, 100)
assertEquals(scale(0.0), -150)
assertEquals(scale(10.0), -20)
assertEquals(scale(20.0), 14)
assertEquals(scale(30.0), 35)
assertEquals(scale(40.0), 51)
assertEquals(scale(50.0), 62)
assertEquals(scale(60.0), 72)
assertEquals(scale(70.0), 80)
assertEquals(scale(80.0), 88)
assertEquals(scale(90.0), 94)
assertEquals(scale(100.0), 100)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/norm/SumValueFunctionSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
import munit.FunSuite
class SumValueFunctionSuite extends FunSuite {
private def newFunction(step: Long) = {
val listVF = new ListValueFunction
val normalizeVF = new SumValueFunction(step, listVF)
listVF.f = normalizeVF
listVF
}
test("basic") {
val n = newFunction(10)
assertEquals(n.update(5, 1.0), List(10L -> 1.0))
assertEquals(n.update(15, 2.0), List(20L -> 2.0))
assertEquals(n.update(25, 2.0), List(30L -> 2.0))
assertEquals(n.update(35, 1.0), List(40L -> 1.0))
assertEquals(n.update(85, 1.0), List(90L -> 1.0))
assertEquals(n.update(95, 2.0), List(100L -> 2.0))
assertEquals(n.update(105, 2.0), List(110L -> 2.0))
n.close()
assertEquals(n.result(), Nil)
}
test("already normalized updates") {
val n = newFunction(10)
assertEquals(n.update(0, 1.0), List(0L -> 1.0))
assertEquals(n.update(10, 2.0), List(10L -> 2.0))
assertEquals(n.update(20, 3.0), List(20L -> 3.0))
assertEquals(n.update(30, 1.0), List(30L -> 1.0))
n.close()
assertEquals(n.result(), Nil)
}
test("already normalized updates, skip 1") {
val n = newFunction(10)
assertEquals(n.update(0, 1.0), List(0L -> 1.0))
assertEquals(n.update(10, 1.0), List(10L -> 1.0))
assertEquals(n.update(30, 1.0), List(30L -> 1.0))
n.close()
assertEquals(n.result(), Nil)
}
test("already normalized updates, miss heartbeat") {
val n = newFunction(10)
assertEquals(n.update(0, 1.0), List(0L -> 1.0))
assertEquals(n.update(10, 2.0), List(10L -> 2.0))
assertEquals(n.update(30, 1.0), List(30L -> 1.0))
assertEquals(n.update(60, 4.0), List(60L -> 4.0))
assertEquals(n.update(70, 2.0), List(70L -> 2.0))
n.close()
assertEquals(n.result(), Nil)
}
test("random offset") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 1.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 1.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 1.0))
n.close()
assertEquals(n.result(), Nil)
}
test("random offset, skip 1") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 1.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 1.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 1.0))
assertEquals(n.update(t(5, 13), 1.0), List(t(6, 0) -> 1.0))
n.close()
assertEquals(n.result(), Nil)
}
test("random offset, skip 2") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 1.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 1.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 1.0))
assertEquals(n.update(t(6, 13), 1.0), List(t(7, 0) -> 1.0))
n.close()
assertEquals(n.result(), Nil)
}
test("random offset, skip almost 2") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 1.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 1.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 1.0))
assertEquals(n.update(t(6, 5), 1.0), List(t(7, 0) -> 1.0))
n.close()
assertEquals(n.result(), Nil)
}
test("random offset, out of order") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 1.0))
assertEquals(n.update(t(1, 12), 1.0), List(t(2, 0) -> 2.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 1.0))
assertEquals(n.update(t(2, 10), 1.0), List(t(3, 0) -> 2.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 1.0))
assertEquals(n.update(t(3, 11), 1.0), List(t(4, 0) -> 2.0))
n.close()
assertEquals(n.result(), Nil)
}
test("random offset, dual reporting") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 1.0))
assertEquals(n.update(t(1, 13), 1.0), List(t(2, 0) -> 2.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 1.0))
assertEquals(n.update(t(2, 13), 1.0), List(t(3, 0) -> 2.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 1.0))
assertEquals(n.update(t(3, 13), 1.0), List(t(4, 0) -> 2.0))
n.close()
assertEquals(n.result(), Nil)
}
test("init, 17") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000)
val v = 1.0 / 60.0
assertEquals(n.update(t(8, 17), v), List(t(9, 0) -> v))
assertEquals(n.update(t(9, 17), 0.0), List(t(10, 0) -> 0.0))
assertEquals(n.update(t(10, 17), 0.0), List(t(11, 0) -> 0.0))
n.close()
assertEquals(n.result(), Nil)
}
test("frequent updates") {
val n = newFunction(10)
assertEquals(n.update(0, 1.0), List(0L -> 1.0))
assertEquals(n.update(2, 2.0), List(10L -> 2.0))
assertEquals(n.update(4, 4.0), List(10L -> 6.0))
assertEquals(n.update(8, 8.0), List(10L -> 14.0))
assertEquals(n.update(12, 2.0), List(20L -> 2.0))
assertEquals(n.update(40, 3.0), List(40L -> 3.0))
n.close()
assertEquals(n.result(), Nil)
}
test("multi-node updates") {
val n = newFunction(10)
// Node 1: if shutting down it can flush an interval early
assertEquals(n.update(0, 1.0), List(0L -> 1.0))
assertEquals(n.update(10, 2.0), List(10L -> 2.0))
// Other nodes: report around the same time. Need to ensure that the flush
// from the node shutting down doesn't block the updates from the other nodes
assertEquals(n.update(0, 3.0), List(0L -> 4.0))
assertEquals(n.update(0, 4.0), List(0L -> 8.0))
assertEquals(n.update(0, 5.0), List(0L -> 13.0))
assertEquals(n.update(10, 6.0), List(10L -> 8.0))
assertEquals(n.update(10, 7.0), List(10L -> 15.0))
assertEquals(n.update(10, 8.0), List(10L -> 23.0))
n.close()
assertEquals(n.result(), Nil)
}
}
|
dmuino/atlas | atlas-webapi/src/test/scala/com/netflix/atlas/webapi/PublishApiJsonSuite.scala | <filename>atlas-webapi/src/test/scala/com/netflix/atlas/webapi/PublishApiJsonSuite.scala<gh_stars>0
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.webapi
import com.netflix.atlas.core.model.Datapoint
import com.netflix.atlas.json.Json
import munit.FunSuite
class PublishApiJsonSuite extends FunSuite {
test("encode and decode datapoint") {
val original = Datapoint(Map("name" -> "foo", "id" -> "bar"), 42L, 1024.0)
val decoded = PublishApi.decodeDatapoint(PublishApi.encodeDatapoint(original))
assertEquals(original, decoded)
}
test("encode and decode batch") {
val commonTags = Map("id" -> "bar")
val original = List(Datapoint(Map("name" -> "foo"), 42L, 1024.0))
val decoded = PublishApi.decodeBatch(PublishApi.encodeBatch(commonTags, original))
assertEquals(original.map(d => d.copy(tags = d.tags ++ commonTags)), decoded)
}
test("decode batch empty") {
val decoded = PublishApi.decodeBatch("{}")
assertEquals(decoded.size, 0)
}
test("decode with legacy array value") {
val expected = Datapoint(Map("name" -> "foo"), 42L, 1024.0)
val decoded =
PublishApi.decodeDatapoint("""{"tags":{"name":"foo"},"timestamp":42,"values":[1024.0]}""")
assertEquals(expected, decoded)
}
test("decode legacy batch empty") {
val decoded = PublishApi.decodeBatch("""
{
"tags": {},
"metrics": []
}
""")
assertEquals(decoded.size, 0)
}
test("decode legacy batch no tags") {
val decoded = PublishApi.decodeBatch("""
{
"metrics": []
}
""")
assertEquals(decoded.size, 0)
}
test("decode legacy batch with tags before") {
val decoded = PublishApi.decodeBatch("""
{
"tags": {
"foo": "bar"
},
"metrics": [
{
"tags": {"name": "test"},
"start": 123456789,
"values": [1.0]
}
]
}
""")
assertEquals(decoded.size, 1)
assertEquals(decoded.head.tags, Map("name" -> "test", "foo" -> "bar"))
}
test("decode legacy batch with tags after") {
val decoded = PublishApi.decodeBatch("""
{
"metrics": [
{
"tags": {"name": "test"},
"start": 123456789,
"values": [1.0]
}
],
"tags": {
"foo": "bar"
}
}
""")
assertEquals(decoded.size, 1)
assertEquals(decoded.head.tags, Map("name" -> "test", "foo" -> "bar"))
}
test("decode legacy batch no tags metric") {
val decoded = PublishApi.decodeBatch("""
{
"metrics": [
{
"tags": {"name": "test"},
"start": 123456789,
"values": [1.0]
}
]
}
""")
assertEquals(decoded.size, 1)
}
test("decode legacy batch with empty name") {
val decoded = PublishApi.decodeBatch("""
{
"metrics": [
{
"tags": {"name": ""},
"start": 123456789,
"values": [1.0]
}
]
}
""")
assertEquals(decoded.size, 1)
decoded.foreach { d =>
assertEquals(d.tags, Map("name" -> ""))
}
}
test("decode legacy batch with null name") {
val decoded = PublishApi.decodeBatch("""
{
"metrics": [
{
"tags": {"name": null},
"start": 123456789,
"values": [1.0]
}
]
}
""")
assertEquals(decoded.size, 1)
decoded.foreach { d =>
assertEquals(d.tags, Map.empty[String, String])
}
}
test("decode list empty") {
val decoded = PublishApi.decodeList("""
[]
""")
assertEquals(decoded.size, 0)
}
test("decode list") {
val decoded = PublishApi.decodeList("""
[
{
"tags": {"name": "test"},
"timestamp": 123456789,
"values": 1.0
}
]
""")
assertEquals(decoded.size, 1)
}
test("decode list with unknown key") {
val decoded = PublishApi.decodeList("""
[
{
"tags": {"name": "test"},
"timestamp": 123456789,
"unknown": {},
"values": 1.0
},
{
"tags": {"name": "test"},
"timestamp": 123456789,
"unknown": {"a":{"b":"c"},"b":[1,2,3]},
"values": 1.0
},
{
"tags": {"name": "test"},
"timestamp": 123456789,
"unknown": [1,2,3],
"values": 1.0
},
{
"tags": {"name": "test"},
"timestamp": 123456789,
"unknown": "foo",
"values": 1.0
}
]
""")
assertEquals(decoded.size, 4)
}
test("decode batch bad object") {
intercept[IllegalArgumentException] {
PublishApi.decodeBatch("""{"foo":"bar"}""")
}
}
test("decode list from encoded datapoint") {
val vs = List(Datapoint(Map("a" -> "b"), 0L, 42.0))
val decoded = PublishApi.decodeList(Json.encode(vs))
assertEquals(decoded.size, 1)
}
test("decode list from PublishApi.encoded datapoint") {
val vs = "[" + PublishApi.encodeDatapoint(Datapoint(Map("a" -> "b"), 0L, 42.0)) + "]"
val decoded = PublishApi.decodeList(vs)
assertEquals(decoded.size, 1)
}
}
|
dmuino/atlas | atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/LwcToAggrDatapoint.scala | <filename>atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/LwcToAggrDatapoint.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.stream.Attributes
import akka.stream.FlowShape
import akka.stream.Inlet
import akka.stream.Outlet
import akka.stream.stage.GraphStage
import akka.stream.stage.GraphStageLogic
import akka.stream.stage.InHandler
import akka.stream.stage.OutHandler
import com.netflix.atlas.eval.model.AggrDatapoint
import com.netflix.atlas.eval.model.LwcDataExpr
import com.netflix.atlas.eval.model.LwcDatapoint
import com.netflix.atlas.eval.model.LwcDiagnosticMessage
import com.netflix.atlas.eval.model.LwcHeartbeat
import com.netflix.atlas.eval.model.LwcSubscription
/**
* Process the SSE output from an LWC service and convert it into a stream of
* [[AggrDatapoint]]s that can be used for evaluation.
*/
private[stream] class LwcToAggrDatapoint(context: StreamContext)
extends GraphStage[FlowShape[AnyRef, AggrDatapoint]] {
private val in = Inlet[AnyRef]("LwcToAggrDatapoint.in")
private val out = Outlet[AggrDatapoint]("LwcToAggrDatapoint.out")
override val shape: FlowShape[AnyRef, AggrDatapoint] = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = {
new GraphStageLogic(shape) with InHandler with OutHandler {
private[this] val state = scala.collection.mutable.AnyRefMap.empty[String, LwcDataExpr]
// HACK: needed until we can plumb the actual source through the system
private var nextSource: Int = 0
override def onPush(): Unit = {
grab(in) match {
case sb: LwcSubscription => updateState(sb)
case dp: LwcDatapoint => pushDatapoint(dp)
case dg: LwcDiagnosticMessage => pushDiagnosticMessage(dg)
case hb: LwcHeartbeat => pushHeartbeat(hb)
case _ => pull(in)
}
}
private def updateState(sub: LwcSubscription): Unit = {
sub.metrics.foreach { m =>
if (!state.contains(m.id)) {
state.put(m.id, m)
}
}
pull(in)
}
private def pushDatapoint(dp: LwcDatapoint): Unit = {
state.get(dp.id) match {
case Some(sub) =>
// TODO, put in source, for now make it random to avoid dedup
nextSource += 1
val expr = sub.expr
val step = sub.step
push(
out,
AggrDatapoint(dp.timestamp, step, expr, nextSource.toString, dp.tags, dp.value)
)
case None =>
pull(in)
}
}
private def pushDiagnosticMessage(diagMsg: LwcDiagnosticMessage): Unit = {
state.get(diagMsg.id).foreach { sub =>
context.log(sub.expr, diagMsg.message)
}
pull(in)
}
private def pushHeartbeat(hb: LwcHeartbeat): Unit = {
push(out, AggrDatapoint.heartbeat(hb.timestamp, hb.step))
}
override def onPull(): Unit = {
pull(in)
}
override def onUpstreamFinish(): Unit = {
completeStage()
}
setHandlers(in, out, this)
}
}
}
|
dmuino/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/model/LwcMessagesSuite.scala | <gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.model
import akka.util.ByteString
import com.netflix.atlas.akka.DiagnosticMessage
import com.netflix.atlas.core.util.Streams
import com.netflix.atlas.json.Json
import munit.FunSuite
import java.util.Random
import java.util.UUID
import scala.util.Using
class LwcMessagesSuite extends FunSuite {
private val step = 60000
test("data expr, decode with legacy frequency field") {
val json = """[{"id":"1234","expression":"name,cpu,:eq,:sum","frequency":10}]"""
val parser = Json.newJsonParser(json)
try {
val actual = LwcMessages.parseDataExprs(parser).head
val expected = LwcDataExpr("1234", "name,cpu,:eq,:sum", 10)
assertEquals(actual, expected)
} finally {
parser.close()
}
}
test("subscription info") {
val expr = "name,cpu,:eq,:avg"
val sum = "name,cpu,:eq,:sum"
val count = "name,cpu,:eq,:count"
val dataExprs = List(LwcDataExpr("a", sum, step), LwcDataExpr("b", count, step))
val expected = LwcSubscription(expr, dataExprs)
val actual = LwcMessages.parse(Json.encode(expected))
assertEquals(actual, expected)
}
test("datapoint") {
val expected = LwcDatapoint(step, "a", Map("foo" -> "bar"), 42.0)
val actual = LwcMessages.parse(Json.encode(expected))
assertEquals(actual, expected)
}
test("datapoint, custom encode") {
val expected = LwcDatapoint(step, "a", Map("foo" -> "bar"), 42.0)
val actual = LwcMessages.parse(expected.toJson)
assertEquals(actual, expected)
}
test("diagnostic message") {
val expected = DiagnosticMessage.error("something bad happened")
val actual = LwcMessages.parse(Json.encode(expected))
assertEquals(actual, expected)
}
test("diagnostic message for a particular expression") {
val expected = LwcDiagnosticMessage("abc", DiagnosticMessage.error("something bad happened"))
val actual = LwcMessages.parse(Json.encode(expected))
assertEquals(actual, expected)
}
test("heartbeat") {
val expected = LwcHeartbeat(1234567890L, 10L)
val actual = LwcMessages.parse(Json.encode(expected))
assertEquals(actual, expected)
}
test("heartbeat not on step boundary") {
intercept[IllegalArgumentException] {
LwcHeartbeat(1234567891L, 10L)
}
}
test("batch: expression") {
val expected = (0 until 10).map { i =>
LwcExpression("name,cpu,:eq,:max", i)
}
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected.toList)
}
test("batch: subscription") {
val expected = (0 until 10).map { i =>
LwcSubscription(
"name,cpu,:eq,:avg",
List(
LwcDataExpr(s"$i", "name,cpu,:eq,:sum", i),
LwcDataExpr(s"$i", "name,cpu,:eq,:count", i)
)
)
}
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected.toList)
}
test("batch: datapoint") {
val expected = (0 until 10).map { i =>
LwcDatapoint(
System.currentTimeMillis(),
s"$i",
if (i % 2 == 0) Map.empty else Map("name" -> "cpu", "node" -> s"i-$i"),
i
)
}
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected.toList)
}
test("batch: lwc diagnostic") {
val expected = (0 until 10).map { i =>
LwcDiagnosticMessage(s"$i", DiagnosticMessage.error("foo"))
}
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected.toList)
}
test("batch: diagnostic") {
val expected = (0 until 10).map { i =>
DiagnosticMessage.error(s"error $i")
}
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected.toList)
}
test("batch: heartbeat") {
val expected = (1 to 10).map { i =>
val step = i * 1000
LwcHeartbeat(System.currentTimeMillis() / step * step, step)
}
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected.toList)
}
test("batch: compatibility") {
// Other tests generate new payloads, but this could mean we do a change that breaks
// compatibility with existing versions. To check for that this test loads a file
// that has been pre-encoded.
val expected = List(
LwcExpression("name,cpu,:eq,:max", 60_000),
LwcSubscription(
"name,cpu,:eq,:avg",
List(
LwcDataExpr("0", "name,cpu,:eq,:sum", 10_000),
LwcDataExpr("1", "name,cpu,:eq,:count", 10_000)
)
),
LwcDatapoint(
1234567890,
"id",
Map.empty,
1.0
),
LwcDatapoint(
1234567890,
"id",
Map("name" -> "cpu", "node" -> s"i-12345"),
2.0
),
LwcDiagnosticMessage("2", DiagnosticMessage.error("foo")),
DiagnosticMessage.info("bar"),
LwcHeartbeat(1234567890, 10)
)
val actual = Using.resource(Streams.resource("lwc-batch.smile")) { in =>
LwcMessages.parseBatch(ByteString(Streams.byteArray(in)))
}
assertEquals(actual, expected)
}
test("batch: random") {
val random = new Random()
(0 until 100).foreach { _ =>
val n = random.nextInt(1000)
val expected = (0 until n).map(_ => randomObject(random)).toList
val actual = LwcMessages.parseBatch(LwcMessages.encodeBatch(expected))
assertEquals(actual, expected)
}
}
private def randomObject(random: Random): AnyRef = {
random.nextInt(6) match {
case 0 =>
LwcExpression(randomString, randomStep(random))
case 1 =>
val n = random.nextInt(10) + 1
LwcSubscription(
randomString,
(0 until n).map(_ => LwcDataExpr(randomString, randomString, randomStep(random))).toList
)
case 2 =>
// Use positive infinity to test special double values. Do not use NaN here
// because NaN != NaN so it will break the assertions for tests.
LwcDatapoint(
random.nextLong(),
randomString,
randomTags(random),
if (random.nextDouble() < 0.1) Double.PositiveInfinity else random.nextDouble()
)
case 3 =>
val msg = DiagnosticMessage(randomString, randomString, None)
LwcDiagnosticMessage(randomString, msg)
case 4 =>
DiagnosticMessage(randomString, randomString, None)
case _ =>
val step = randomStep(random)
val timestamp = System.currentTimeMillis() / step * step
LwcHeartbeat(timestamp, step)
}
}
private def randomTags(random: Random): Map[String, String] = {
val n = random.nextInt(10)
(0 until n).map(_ => randomString -> randomString).toMap
}
private def randomStep(random: Random): Long = {
random.nextInt(10) * 1000 + 1000
}
private def randomString: String = UUID.randomUUID().toString
}
|
dmuino/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/util/HostRewriterSuite.scala | <filename>atlas-eval/src/test/scala/com/netflix/atlas/eval/util/HostRewriterSuite.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.util
import com.netflix.atlas.core.model.CustomVocabulary
import com.netflix.atlas.core.model.ModelExtractors
import com.netflix.atlas.core.model.StyleExpr
import com.netflix.atlas.core.stacklang.Interpreter
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class HostRewriterSuite extends FunSuite {
private val config = ConfigFactory.load()
private val interpreter = Interpreter(new CustomVocabulary(config).allWords)
private def interpret(str: String): List[StyleExpr] = {
interpreter.execute(str).stack.reverse.flatMap {
case ModelExtractors.PresentationType(t) => t.perOffset
case v => throw new MatchError(v)
}
}
test("default shouldn't change the expr") {
val rewriter = new HostRewriter(config.getConfig("atlas.eval.host-rewrite"))
val exprs = interpret("name,sps,:eq,:sum")
val host = "foo.example.com"
assertEquals(rewriter.rewrite(host, exprs), exprs)
}
test("restrict by region extracted from host") {
val regionConfig = ConfigFactory.parseString("""
|pattern = "^foo\\.([^.]+)\\.example.com$"
|key = "region"
|""".stripMargin)
val rewriter = new HostRewriter(regionConfig)
val exprs = interpret("name,sps,:eq,:sum")
val expected = interpret("name,sps,:eq,region,us-east-1,:eq,:and,:sum")
val host = "foo.us-east-1.example.com"
assertEquals(rewriter.rewrite(host, exprs), expected)
}
test("use first group if multiple in pattern") {
val regionConfig =
ConfigFactory.parseString("""
|pattern = "^foo\\.([^.]+)\\.(example|example2).com$"
|key = "region"
|""".stripMargin)
val rewriter = new HostRewriter(regionConfig)
val exprs = interpret("name,sps,:eq,:sum")
val expected = interpret("name,sps,:eq,region,us-east-1,:eq,:and,:sum")
val host = "foo.us-east-1.example.com"
assertEquals(rewriter.rewrite(host, exprs), expected)
}
test("no group in pattern") {
val regionConfig = ConfigFactory.parseString("""
|pattern = "^foo\\.example\\.com$"
|key = "region"
|""".stripMargin)
val rewriter = new HostRewriter(regionConfig)
val exprs = interpret("name,sps,:eq,:sum")
val host = "foo.example.com"
intercept[IndexOutOfBoundsException] {
rewriter.rewrite(host, exprs)
}
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/model/VocabularySuite.scala | <reponame>dmuino/atlas
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import munit.FunSuite
class VocabularySuite extends FunSuite {
for (vocab <- StyleVocabulary :: StyleVocabulary.dependencies; w <- vocab.words) {
test(s"${vocab.name}: ${w.name} == self") {
assertEquals(w, w)
}
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/model/BlockSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import nl.jqno.equalsverifier.EqualsVerifier
import nl.jqno.equalsverifier.Warning
import munit.FunSuite
import scala.util.Random
class BlockSuite extends FunSuite {
def rleBlock(start: Long, data: List[(Int, Double)], size: Int = 60): Block = {
val block = ArrayBlock(start, size)
var i = 0
data.foreach { t =>
(i to t._1).foreach { j =>
block.buffer(j) = t._2
}
i = t._1 + 1
}
Block.compress(block)
}
def checkValues(b: Block, values: List[Double]): Unit = {
values.zipWithIndex.foreach { v =>
val msg = "b(%d) => %f != %f".format(v._2, b.get(v._2), v._1)
val res = java.lang.Double.compare(v._1, b.get(v._2))
assert(res == 0, msg)
}
}
test("ConstantBlock.get") {
val b = ConstantBlock(0L, 60, 42.0)
checkValues(b, (0 until 60).map(i => 42.0).toList)
}
test("ArrayBlock.get") {
val b = ArrayBlock(0L, 60)
(0 until 60).foreach(i => b.buffer(i) = i)
checkValues(b, (0 until 60).map(i => i.toDouble).toList)
intercept[ArrayIndexOutOfBoundsException] {
b.get(60)
}
}
test("RleBlock.get") {
val data = List(5 -> 42.0, 37 -> Double.NaN, 59 -> 21.0)
val b = rleBlock(0L, data)
val ab = b.toArrayBlock
checkValues(b, ab.buffer.toList)
}
test("SparseBlock.get") {
val data =
(0 until 5).map(i => 0) ++
(5 until 37).map(i => SparseBlock.NaN) ++
(37 until 60).map(i => 1)
val indexes = data.map(_.asInstanceOf[Byte]).toArray
val values = Array(42.0, 21.0)
val b = SparseBlock(0L, indexes, values)
val ab = b.toArrayBlock
checkValues(b, ab.buffer.toList)
}
test("SparseBlock.get, size > 120") {
val data =
(0 until 5).map(i => 0) ++
(5 until 37).map(i => SparseBlock.NaN) ++
(37 until 360).map(i => 1)
val indexes = data.map(_.asInstanceOf[Byte]).toArray
val values = Array(42.0, 21.0)
val b = SparseBlock(0L, indexes, values)
val ab = b.toArrayBlock
checkValues(b, ab.buffer.toList)
}
test("Block.get(pos, aggr)") {
import java.lang.{Double => JDouble}
val b = ArrayBlock(0L, 2)
b.buffer(0) = 0.0
b.buffer(1) = Double.NaN
assertEquals(b.get(0, Block.Sum), 0.0)
assertEquals(b.get(0, Block.Count), 1.0)
assertEquals(b.get(0, Block.Min), 0.0)
assertEquals(b.get(0, Block.Max), 0.0)
assert(JDouble.isNaN(b.get(1, Block.Sum)))
assert(JDouble.isNaN(b.get(1, Block.Count)))
assert(JDouble.isNaN(b.get(1, Block.Min)))
assert(JDouble.isNaN(b.get(1, Block.Max)))
}
test("compress, constant value") {
val b = ArrayBlock(0L, 60)
(0 until 60).foreach(i => b.buffer(i) = 42.0)
assert(Block.compress(b) == ConstantBlock(0L, 60, 42.0))
}
test("compress, rle -> sparse") {
val data = List(5 -> 42.0, 37 -> Double.NaN, 59 -> 21.0)
val b = rleBlock(0L, data)
val nb = Block.compress(b.toArrayBlock).asInstanceOf[SparseBlock]
//assertEquals(nb.byteCount, 84)
assertEquals(nb.values.length, 2)
(0 until 60).foreach { i =>
assertEquals(java.lang.Double.compare(b.get(i), nb.get(i)), 0)
}
}
test("compress, rle -> sparse, special values") {
val data = List(5 -> 2.0, 37 -> Double.NaN, 59 -> 0.0)
val b = rleBlock(0L, data)
val nb = Block.compress(b.toArrayBlock).asInstanceOf[SparseBlock]
assertEquals(nb.values.length, 1)
(0 until 60).foreach { i =>
assertEquals(java.lang.Double.compare(b.get(i), nb.get(i)), 0)
}
}
test("compress, rle -> sparse, large block") {
val data = List(5 -> 2.0, 37 -> Double.NaN, 359 -> 0.0)
val b = rleBlock(0L, data, 360)
val nb = Block.compress(b.toArrayBlock).asInstanceOf[SparseBlock]
assertEquals(nb.values.length, 1)
(0 until 360).foreach { i =>
assertEquals(java.lang.Double.compare(b.get(i), nb.get(i)), 0)
}
}
test("compress fails on large block") {
val data = (0 until 360).map(i => i -> (if (i <= 129) i.toDouble else 42.42)).toList
val b = rleBlock(0L, data, 360)
val nb = Block.compress(b.toArrayBlock)
(0 until 360).foreach { i =>
assertEquals(java.lang.Double.compare(b.get(i), nb.get(i)), 0)
}
}
test("compress, array") {
val b = ArrayBlock(0L, 60)
(0 until 60).foreach(i => b.buffer(i) = i)
assert(Block.compress(b) eq b)
}
test("lossyCompress, array") {
val b = ArrayBlock(0L, 60)
(0 until 60).foreach(i => b.buffer(i) = i)
assertEquals(Block.lossyCompress(b), FloatArrayBlock(b))
}
test("compress, small block") {
val b = ArrayBlock(0L, 5)
(0 until 5).foreach(i => b.buffer(i) = 42.0)
assert(Block.compress(b) eq b)
}
test("merge") {
val data1 = List(5 -> 42.0, 37 -> Double.NaN, 59 -> 21.0)
val data2 = List(9 -> 41.0, 45 -> Double.NaN, 59 -> 22.0)
val expected = List(5 -> 42.0, 9 -> 41.0, 37 -> Double.NaN, 45 -> 21.0, 59 -> 22.0)
val b1 = rleBlock(0L, data1)
val b2 = rleBlock(0L, data2)
val b3 = Block.merge(b1, b2)
val expBlock = rleBlock(0L, expected).toArrayBlock
(0 until 60).foreach { i =>
assertEquals(java.lang.Double.compare(b3.get(i), expBlock.get(i)), 0)
}
assert(b3.isInstanceOf[SparseBlock])
}
test("merge rollup") {
val b1 = RollupBlock(
min = ConstantBlock(0L, 60, 1.0),
max = ConstantBlock(0L, 60, 50.0),
sum = ConstantBlock(0L, 60, 51.0),
count = ConstantBlock(0L, 60, 2.0)
)
val b2 = RollupBlock(
min = ConstantBlock(0L, 60, 2.0),
max = ConstantBlock(0L, 60, 3.0),
sum = ConstantBlock(0L, 60, 6.0),
count = ConstantBlock(0L, 60, 3.0)
)
val b3 = Block.merge(b1, b2)
val expected = RollupBlock(
min = ConstantBlock(0L, 60, 2.0),
max = ConstantBlock(0L, 60, 50.0),
sum = ConstantBlock(0L, 60, 51.0),
count = ConstantBlock(0L, 60, 3.0)
)
assertEquals(b3, expected)
}
test("merge prefer rollup to scalar") {
val b1 = RollupBlock(
min = ConstantBlock(0L, 60, 1.0),
max = ConstantBlock(0L, 60, 50.0),
sum = ConstantBlock(0L, 60, 51.0),
count = ConstantBlock(0L, 60, 2.0)
)
val b2 = ConstantBlock(0L, 60, 2.0)
assertEquals(Block.merge(b1, b2), b1)
assertEquals(Block.merge(b2, b1), b1)
}
test("rollup") {
import java.lang.{Double => JDouble}
val n = 5
val r = RollupBlock.empty(0L, n)
(0 until n).foreach { i =>
assert(JDouble.isNaN(r.get(i, Block.Sum)))
assert(JDouble.isNaN(r.get(i, Block.Count)))
assert(JDouble.isNaN(r.get(i, Block.Min)))
assert(JDouble.isNaN(r.get(i, Block.Max)))
}
r.rollup(ConstantBlock(0L, n, 1.0))
(0 until n).foreach { i =>
assertEquals(r.get(i, Block.Sum), 1.0)
assertEquals(r.get(i, Block.Count), 1.0)
assertEquals(r.get(i, Block.Min), 1.0)
assertEquals(r.get(i, Block.Max), 1.0)
}
r.rollup(ConstantBlock(0L, n, 3.0))
(0 until n).foreach { i =>
assertEquals(r.get(i, Block.Sum), 4.0)
assertEquals(r.get(i, Block.Count), 2.0)
assertEquals(r.get(i, Block.Min), 1.0)
assertEquals(r.get(i, Block.Max), 3.0)
}
r.rollup(ConstantBlock(0L, n, 0.5))
(0 until n).foreach { i =>
assertEquals(r.get(i, Block.Sum), 4.5)
assertEquals(r.get(i, Block.Count), 3.0)
assertEquals(r.get(i, Block.Min), 0.5)
assertEquals(r.get(i, Block.Max), 3.0)
}
}
test("compressed array: get/set 2") {
import CompressedArrayBlock._
(0 until 32).foreach { i =>
assertEquals(set2(0L, i, 0), 0L)
assertEquals(set2(0L, i, 1), 1L << (2 * i))
assertEquals(set2(0L, i, 2), 2L << (2 * i))
assertEquals(set2(0L, i, 3), 3L << (2 * i))
(0 until 32).foreach { j =>
assertEquals(get2(set2(-1L, i, 0), j), (if (i == j) 0 else 3))
assertEquals(get2(set2(-1L, i, 1), j), (if (i == j) 1 else 3))
assertEquals(get2(set2(-1L, i, 2), j), (if (i == j) 2 else 3))
assertEquals(get2(set2(-1L, i, 3), j), 3)
}
}
}
test("compressed array: get/set 4") {
import CompressedArrayBlock._
(0 until 16).foreach { i =>
(0 until 16).foreach { v =>
assertEquals(set4(0L, i, v), v.toLong << (4 * i))
}
(0 until 16).foreach { j =>
(0 until 16).foreach { v =>
assertEquals(get4(set4(-1L, i, v), j), (if (i == j) v else 0xF))
}
}
}
}
test("compressed array: ceiling division") {
import CompressedArrayBlock._
assertEquals(ceilingDivide(8, 2), 4)
assertEquals(ceilingDivide(9, 2), 5)
}
test("compressed array: empty") {
val block = CompressedArrayBlock(0L, 60)
(0 until 60).foreach { i =>
assert(block.get(i).isNaN)
}
}
test("compressed array: zero") {
val block = CompressedArrayBlock(0L, 60)
(0 until 60).foreach { i =>
block.update(i, 0.0)
assertEquals(block.get(i), 0.0)
}
assert(block.byteCount < 25)
}
test("compressed array: single increment") {
val block = CompressedArrayBlock(0L, 60)
block.update(14, 1.0 / 60.0)
assertEquals(block.get(14), 1.0 / 60.0)
(15 until 30).foreach { i =>
block.update(i, 0.0)
assertEquals(block.get(i), 0.0)
}
assert(block.byteCount < 25)
}
test("compressed array: double increment") {
val block = CompressedArrayBlock(0L, 60)
block.update(14, 2.0 / 60.0)
assertEquals(block.get(14), 2.0 / 60.0)
(15 until 30).foreach { i =>
block.update(i, 0.0)
assertEquals(block.get(i), 0.0)
}
assert(block.byteCount < 50)
}
test("compressed array: single uncommon value") {
val block = CompressedArrayBlock(0L, 60)
(0 until 60).foreach { i =>
block.update(i, 2.0)
assertEquals(block.get(i), 2.0)
}
assert(block.byteCount < 50)
}
test("compressed array: grow") {
val block = CompressedArrayBlock(0L, 60)
(0 until 60).foreach { i =>
block.update(i, i.toDouble)
assertEquals(block.get(i), i.toDouble)
}
assert(block.byteCount < 500)
}
test("compressed array: random access") {
val block = CompressedArrayBlock(0L, 60)
Random.shuffle((0 until 60).toList).foreach { i =>
block.update(i, i.toDouble)
}
(0 until 60).foreach { i =>
assertEquals(block.get(i), i.toDouble)
}
assert(block.byteCount < 500)
}
test("compressed array: various block sizes") {
(1 until 24 * 60).foreach { i =>
val block = CompressedArrayBlock(0L, i)
(0 until i).foreach { j =>
block.update(j, j.toDouble)
assertEquals(block.get(j), j.toDouble)
}
}
}
test("compressed array: small") {
val block = CompressedArrayBlock(0L, 2)
block.update(0, 1.0)
assertEquals(block.get(0), 1.0)
assert(block.get(1).isNaN)
}
test("compressed array: equals") {
EqualsVerifier
.forClass(classOf[CompressedArrayBlock])
.suppress(Warning.NONFINAL_FIELDS)
.verify()
}
}
|
dmuino/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/graph/SimpleLegendsSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.graph
import com.netflix.atlas.core.model.CustomVocabulary
import com.netflix.atlas.core.model.ModelExtractors
import com.netflix.atlas.core.model.StyleExpr
import com.netflix.atlas.core.stacklang.Interpreter
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class SimpleLegendsSuite extends FunSuite {
private val notSet = "__NOT_SET__"
private val interpreter = Interpreter(new CustomVocabulary(ConfigFactory.load()).allWords)
private def eval(str: String): List[StyleExpr] = {
interpreter
.execute(str)
.stack
.map {
case ModelExtractors.PresentationType(t) => t
case v => throw new MatchError(v)
}
.reverse
.flatMap(_.perOffset)
}
private def legends(str: String): List[String] = {
SimpleLegends.generate(eval(str)).map(_.settings.getOrElse("legend", notSet))
}
test("honor explicit legend") {
assertEquals(legends("name,cpu,:eq,:sum,foo,:legend"), List("foo"))
}
test("just math") {
assertEquals(legends("4,5,:add,10,:mul"), List(notSet))
}
test("reqular query and just math") {
assertEquals(legends("name,cpu,:eq,:sum,seconds,:time"), List("cpu", notSet))
}
test("prefer just name") {
assertEquals(legends("name,cpu,:eq,:sum"), List("cpu"))
assertEquals(legends("name,cpu,:eq,id,user,:eq,:and,:sum"), List("cpu"))
}
test("use group by keys") {
assertEquals(legends("name,cpu,:eq,:sum,(,app,id,),:by"), List("$app $id"))
}
test("name with math") {
assertEquals(legends("name,cpu,:eq,:sum,4,:add,6,:mul,:abs"), List("cpu"))
}
test("name regex") {
assertEquals(legends("name,cpu,:re,:sum"), List("cpu"))
}
test("name not present") {
assertEquals(legends("id,user,:eq,:sum"), List("user"))
}
test("name with offsets") {
val expr = "name,cpu,:eq,:sum,(,0h,1w,),:offset"
assertEquals(legends(expr), List("cpu", "cpu (offset=$atlas.offset)"))
}
test("name with avg") {
assertEquals(legends("name,cpu,:eq,:avg"), List("cpu"))
}
test("name with dist avg") {
assertEquals(legends("name,cpu,:eq,:dist-avg"), List("cpu"))
}
test("name with dist-stddev") {
assertEquals(legends("name,cpu,:eq,:dist-stddev"), List("cpu"))
}
test("name not clause") {
assertEquals(legends("name,cpu,:eq,:not,:sum"), List("!cpu"))
}
test("name with node avg") {
assertEquals(legends("name,cpu,:eq,:node-avg"), List("cpu"))
}
test("group by with offsets") {
val expr = "name,cpu,:eq,:sum,(,id,),:by,(,0h,1w,),:offset"
assertEquals(legends(expr), List("$id", "$id (offset=$atlas.offset)"))
}
test("complex: same name and math") {
assertEquals(legends("name,cpu,:eq,:sum,:dup,:add"), List("cpu"))
}
test("complex: not clause") {
val expr = "name,cpu,:eq,:dup,id,user,:eq,:and,:sum,:swap,id,user,:eq,:not,:and,:sum"
assertEquals(legends(expr), List("user", "!user"))
}
test("complex: different names and math") {
assertEquals(legends("name,cpu,:eq,:sum,name,disk,:eq,:sum,:and"), List(notSet))
}
test("multi: different names") {
assertEquals(legends("name,cpu,:eq,:sum,name,disk,:eq,:sum"), List("cpu", "disk"))
}
test("multi: same name further restricted") {
val vs = legends(
"name,cpu,:eq,:sum," +
"name,cpu,:eq,id,user,:eq,:and,:sum," +
"name,cpu,:eq,id,system,:eq,:and,:sum," +
"name,cpu,:eq,id,idle,:eq,:and,:sum,"
)
assertEquals(vs, List("cpu", "user", "system", "idle"))
}
test("multi: same name with math") {
val vs = legends("name,cpu,:eq,:sum,:dup,4,:add")
assertEquals(vs, List("cpu", "cpu"))
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/algorithm/OnlineIgnoreNSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.algorithm
class OnlineIgnoreNSuite extends BaseOnlineAlgorithmSuite {
override def newInstance: OnlineAlgorithm = OnlineIgnoreN(10)
test("n = 1") {
val algo = OnlineIgnoreN(1)
assert(algo.next(0.0).isNaN)
assertEquals(algo.next(1.0), 1.0)
assertEquals(algo.next(2.0), 2.0)
assert(algo.next(Double.NaN).isNaN)
}
test("n = 1, reset") {
val algo = OnlineIgnoreN(1)
assert(algo.next(0.0).isNaN)
assertEquals(algo.next(1.0), 1.0)
algo.reset()
assert(algo.next(2.0).isNaN)
assertEquals(algo.next(3.0), 3.0)
}
test("n = 2") {
val algo = OnlineIgnoreN(2)
assert(algo.next(0.0).isNaN)
assert(algo.next(1.0).isNaN)
assertEquals(algo.next(2.0), 2.0)
assert(algo.next(Double.NaN).isNaN)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/db/AggregateCollectorSuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.db
import com.netflix.atlas.core.model.ArrayTimeSeq
import com.netflix.atlas.core.model.CollectorStats
import com.netflix.atlas.core.model.DataExpr
import com.netflix.atlas.core.model.DsType
import com.netflix.atlas.core.model.Query
import munit.FunSuite
class AggregateCollectorSuite extends FunSuite {
private def newBuffer(v: Double, start: Long = 0L) = {
new TimeSeriesBuffer(Map.empty, new ArrayTimeSeq(DsType.Gauge, start, 60000, Array.fill(1)(v)))
}
private def newTaggedBuffer(tags: Map[String, String], v: Double, start: Long = 0L) = {
new TimeSeriesBuffer(tags, new ArrayTimeSeq(DsType.Gauge, start, 60000, Array.fill(1)(v)))
}
test("sum collector") {
val c = new SumAggregateCollector
assertEquals(c.result, Nil)
c.add(newBuffer(1.0))
c.add(newBuffer(1.0))
c.add(newBuffer(1.0))
c.add(newBuffer(1.0))
assertEquals(c.result, List(newBuffer(4.0)))
assertEquals(c.stats, CollectorStats(4, 4, 1, 1))
}
test("sum collector -- combine") {
val c1 = new SumAggregateCollector
c1.add(newBuffer(1.0))
val c2 = new SumAggregateCollector
c2.add(newBuffer(2.0))
c1.combine(c2)
assertEquals(c1.result, List(newBuffer(3.0)))
assertEquals(c1.stats, CollectorStats(2, 2, 1, 1))
}
test("sum collector -- combine(null, c)") {
val c1 = new SumAggregateCollector
val c2 = new SumAggregateCollector
c2.add(newBuffer(2.0))
c1.combine(c2)
assertEquals(c1.result, List(newBuffer(2.0)))
assertEquals(c1.stats, CollectorStats(1, 1, 1, 1))
}
test("sum collector -- combine(c, null)") {
val c1 = new SumAggregateCollector
c1.add(newBuffer(1.0))
val c2 = new SumAggregateCollector
c1.combine(c2)
assertEquals(c1.result, List(newBuffer(1.0)))
assertEquals(c1.stats, CollectorStats(1, 1, 1, 1))
}
test("sum collector -- combine(null, c4)") {
val c1 = new SumAggregateCollector
val c2 = new SumAggregateCollector
c2.add(newBuffer(1.0))
c2.add(newBuffer(1.0))
c2.add(newBuffer(1.0))
c2.add(newBuffer(1.0))
c1.combine(c2)
assertEquals(c1.result, List(newBuffer(4.0)))
assertEquals(c1.stats, CollectorStats(4, 4, 1, 1))
}
test("sum collector -- combine(c4, null)") {
val c1 = new SumAggregateCollector
c1.add(newBuffer(1.0))
c1.add(newBuffer(1.0))
c1.add(newBuffer(1.0))
c1.add(newBuffer(1.0))
val c2 = new SumAggregateCollector
c1.combine(c2)
assertEquals(c1.result, List(newBuffer(4.0)))
assertEquals(c1.stats, CollectorStats(4, 4, 1, 1))
}
test("min collector") {
val c = new MinAggregateCollector
assertEquals(c.result, Nil)
c.add(newBuffer(3.0))
c.add(newBuffer(1.0))
c.add(newBuffer(2.0))
c.add(newBuffer(5.0))
assertEquals(c.result, List(newBuffer(1.0)))
assertEquals(c.stats, CollectorStats(4, 4, 1, 1))
}
test("max collector") {
val c = new MaxAggregateCollector
assertEquals(c.result, Nil)
c.add(newBuffer(3.0))
c.add(newBuffer(1.0))
c.add(newBuffer(2.0))
c.add(newBuffer(5.0))
assertEquals(c.result, List(newBuffer(5.0)))
assertEquals(c.stats, CollectorStats(4, 4, 1, 1))
}
test("all collector") {
val expected = List(newBuffer(3.0), newBuffer(1.0), newBuffer(2.0), newBuffer(5.0))
val c = new AllAggregateCollector
assertEquals(c.result, Nil)
expected.foreach(c.add)
assertEquals(c.result, expected)
assertEquals(c.stats, CollectorStats(4, 4, 4, 4))
}
test("by collector -- all") {
val expected = List(
newTaggedBuffer(Map("a" -> "1", "b" -> "2"), 3.0),
newTaggedBuffer(Map("a" -> "2", "b" -> "2"), 1.0),
newTaggedBuffer(Map("a" -> "3", "b" -> "3"), 2.0),
newTaggedBuffer(Map("a" -> "4", "b" -> "2", "c" -> "7"), 5.0)
)
val by = DataExpr.GroupBy(DataExpr.Sum(Query.False), List("a"))
val c = new GroupByAggregateCollector(by)
assertEquals(c.result, Nil)
expected.foreach(c.add)
assertEquals(c.result.toSet, expected.toSet)
assertEquals(c.stats, CollectorStats(4, 4, 4, 4))
}
test("by collector -- grp") {
val input = List(
newTaggedBuffer(Map("a" -> "1", "b" -> "2"), 3.0),
newTaggedBuffer(Map("a" -> "2", "b" -> "2"), 1.0),
newTaggedBuffer(Map("a" -> "3", "b" -> "3"), 2.0),
newTaggedBuffer(Map("a" -> "4", "b" -> "2", "c" -> "7"), 5.0)
)
val expected = List(
newTaggedBuffer(Map("b" -> "2"), 9.0),
newTaggedBuffer(Map("a" -> "3", "b" -> "3"), 2.0)
)
val by = DataExpr.GroupBy(DataExpr.Sum(Query.False), List("b"))
val c = new GroupByAggregateCollector(by)
assertEquals(c.result, Nil)
input.foreach(c.add)
assertEquals(c.result.toSet, expected.toSet)
assertEquals(c.stats, CollectorStats(4, 4, 2, 2))
}
test("by collector -- missing key") {
val input = List(
newTaggedBuffer(Map("a" -> "1", "b" -> "2"), 3.0),
newTaggedBuffer(Map("a" -> "2", "b" -> "2"), 1.0),
newTaggedBuffer(Map("a" -> "3", "b" -> "3"), 2.0),
newTaggedBuffer(Map("a" -> "4", "b" -> "2", "c" -> "7"), 5.0)
)
val expected = List(newTaggedBuffer(Map("a" -> "4", "b" -> "2", "c" -> "7"), 5.0))
val by = DataExpr.GroupBy(DataExpr.Sum(Query.False), List("b", "c"))
val c = new GroupByAggregateCollector(by)
assertEquals(c.result, Nil)
input.foreach(c.add)
assertEquals(c.result.toSet, expected.toSet)
assertEquals(c.stats, CollectorStats(1, 1, 1, 1))
}
}
|
dmuino/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/model/MathVocabulary.scala | <filename>atlas-core/src/main/scala/com/netflix/atlas/core/model/MathVocabulary.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import java.time.ZoneId
import java.time.ZoneOffset
import com.netflix.atlas.core.model.DataExpr.AggregateFunction
import com.netflix.atlas.core.model.MathExpr.AggrMathExpr
import com.netflix.atlas.core.model.MathExpr.NamedRewrite
import com.netflix.atlas.core.stacklang.Context
import com.netflix.atlas.core.stacklang.SimpleWord
import com.netflix.atlas.core.stacklang.StandardVocabulary.Macro
import com.netflix.atlas.core.stacklang.Vocabulary
import com.netflix.atlas.core.stacklang.Word
object MathVocabulary extends Vocabulary {
import com.netflix.atlas.core.model.ModelExtractors._
import com.netflix.atlas.core.stacklang.Extractors._
val name: String = "math"
val dependsOn: List[Vocabulary] = List(DataVocabulary)
val words: List[Word] = List(
As,
GroupBy,
Const,
Random,
SeededRandom,
Time,
TimeSpan,
CommonQuery,
NamedRewrite,
ClampMin,
ClampMax,
Abs,
Negate,
Sqrt,
PerStep,
Add,
Subtract,
Multiply,
Divide,
Power,
GreaterThan,
GreaterThanEqual,
LessThan,
LessThanEqual,
FAdd,
FSubtract,
FMultiply,
FDivide,
And,
Or,
Sum,
Count,
Min,
Max,
Percentiles,
Macro(
"avg",
List(
":dup",
":dup",
":sum",
":swap",
":count",
":div",
"avg",
":named-rewrite"
),
List("name,sps,:eq,(,nf.cluster,),:by", "name,sps,:eq,1h,:offset")
),
Macro(
"stddev",
List(
// Copy of base query
":dup",
// If the aggregate function is not explicit, then we need to force
// the conversion. Using `fadd` avoids conversion of `NaN` values to
// zero.
"0",
":fadd",
":dup",
// N
":count",
// sum(x^2)
":over",
":dup",
":mul",
":sum",
// N * sum(x^2)
":mul",
// sum(x)
":over",
":sum",
// sum(x)^2
":dup",
":mul",
// N * sum(x^2) - sum(x)^2
":sub",
// N^2
":swap",
":count",
":dup",
":mul",
// v = (N * sum(x^2) - sum(x)^2) / N^2
":div",
// stddev = sqrt(v)
":sqrt",
// Avoid expansion when displayed
"stddev",
":named-rewrite"
),
List("name,sps,:eq,(,nf.cluster,),:by")
),
Macro(
"pct",
List(
":dup",
":dup",
":sum",
":div",
"100",
":mul",
"pct",
":named-rewrite"
),
List("name,sps,:eq,(,nf.cluster,),:by")
),
Macro(
"dist-avg",
List(
":dup",
"statistic",
"(",
"totalTime",
"totalAmount",
")",
":in",
":sum",
"statistic",
"count",
":eq",
":sum",
":div",
":swap",
":cq",
"dist-avg",
":named-rewrite"
),
List("name,playback.startLatency,:eq")
),
Macro(
"dist-max",
List(
":dup",
"statistic",
"max",
":eq",
":max",
":swap",
":cq",
"dist-max",
":named-rewrite"
),
List("name,playback.startLatency,:eq")
),
Macro(
"dist-stddev",
List(
":dup",
// N
"statistic",
"count",
":eq",
":sum",
// sum(x^2)
"statistic",
"totalOfSquares",
":eq",
":sum",
// N * sum(x^2)
":mul",
// sum(x)
"statistic",
"(",
"totalAmount",
"totalTime",
")",
":in",
":sum",
// sum(x)^2
":dup",
":mul",
// N * sum(x^2) - sum(x)^2
":sub",
// N^2
"statistic",
"count",
":eq",
":sum",
":dup",
":mul",
// v = (N * sum(x^2) - sum(x)^2) / N^2
":div",
// stddev = sqrt(v)
":sqrt",
// Swap and use :cq to apply a common query
":swap",
":cq",
// Avoid expansion when displayed
"dist-stddev",
":named-rewrite"
),
List("name,playback.startLatency,:eq")
),
Macro("median", List("(", "50", ")", ":percentiles"), List("name,requestLatency,:eq"))
)
case object As extends SimpleWord {
override def name: String = "as"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: String) :: (_: String) :: TimeSeriesType(_) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (replacement: String) :: (original: String) :: TimeSeriesType(t) :: stack =>
MathExpr.As(t, original, replacement) :: stack
}
override def summary: String =
"""
|Map a tag key name to an alternate name.
""".stripMargin.trim
override def signature: String = {
"TimeSeriesExpr original:String replacement:String -- TimeSeriesExpr"
}
override def examples: List[String] = List("name,sps,:eq,(,nf.cluster,),:by,nf.cluster,c")
}
case object GroupBy extends SimpleWord {
override def name: String = "by"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case StringListType(_) :: (t: AggrMathExpr) :: _ if t.expr.isGrouped =>
// Multi-level group by with an explicit aggregate specified
true
case StringListType(_) :: TimeSeriesType(t) :: _ if t.isGrouped =>
// Multi-level group by with an implicit aggregate of :sum
true
case StringListType(_) :: TimeSeriesType(t) :: _ =>
// Default data or math aggregate group by applied across math operations
true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case StringListType(keys) :: (t: AggrMathExpr) :: stack if t.expr.isGrouped =>
// Multi-level group by with an explicit aggregate specified
MathExpr.GroupBy(t, keys) :: stack
case StringListType(keys) :: TimeSeriesType(t) :: stack if t.isGrouped =>
// Multi-level group by with an implicit aggregate of :sum
MathExpr.GroupBy(MathExpr.Sum(t), keys) :: stack
case StringListType(keys) :: TimeSeriesType(t) :: stack =>
// Default data group by applied across math operations
val f = t.rewrite {
case nr: NamedRewrite => nr.groupBy(keys)
case af: AggregateFunction => DataExpr.GroupBy(af, keys)
case af: AggrMathExpr if af.expr.isGrouped => MathExpr.GroupBy(af, keys)
}
f :: stack
}
override def summary: String =
"""
|Apply a common group by to all aggregation functions in the expression.
""".stripMargin.trim
override def signature: String = "TimeSeriesExpr keys:List -- TimeSeriesExpr"
override def examples: List[String] = List("name,sps,:eq,:avg,(,nf.cluster,)")
}
case object Const extends SimpleWord {
override def name: String = "const"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: String) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (v: String) :: stack => MathExpr.Constant(v.toDouble) :: stack
}
override def summary: String =
"""
|Generates a line where each datapoint is a constant value.
""".stripMargin.trim
override def signature: String = "Double -- TimeSeriesExpr"
override def examples: List[String] = List("42")
}
case object Random extends SimpleWord {
override def name: String = "random"
protected def matcher: PartialFunction[List[Any], Boolean] = { case _ => true }
protected def executor: PartialFunction[List[Any], List[Any]] = {
case s => MathExpr.Random :: s
}
override def summary: String =
"""
|Generate a time series that appears to be random noise for the purposes of
|experimentation and generating sample data. To ensure that the line is deterministic
|and reproducible it actually is based on a hash of the timestamp. Each datapoint is a
|value between 0.0 and 1.0.
""".stripMargin.trim
override def signature: String = " -- TimeSeriesExpr"
override def examples: List[String] = List("")
}
case object SeededRandom extends SimpleWord {
override def name: String = "srandom"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case IntType(_) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case IntType(seed) :: s => MathExpr.SeededRandom(seed) :: s
}
override def summary: String =
"""
|Generate a time series that appears to be random noise for the purposes of
|experimentation and generating sample data. To ensure that the line is deterministic
|and reproducible it actually is based on a hash of the timestamp. The seed value is
|used to vary the values for the purposes of creating mulitple different sample lines.
|Each datapoint is a value between 0.0 and 1.0.
""".stripMargin.trim
override def signature: String = "seed:Int -- TimeSeriesExpr"
override def examples: List[String] = List("42")
}
case object Time extends SimpleWord {
override def name: String = "time"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: String) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (v: String) :: stack => MathExpr.Time(v) :: stack
}
override def summary: String =
"""
|Generates a line based on the current time. Supported modes are secondOfMinute,
|secondOfDay, minuteOfHour, minuteOfDay, hourOfDay, dayOfWeek, dayOfMonth, dayOfYear,
|monthOfYear, yearOfCentury, yearOfEra, seconds (since epoch), or days (since epoch). The
|mode can also be a value of the enum
|[ChronoField](https://docs.oracle.com/javase/8/docs/api/java/time/temporal/ChronoField.html).
""".stripMargin.trim
override def signature: String = "String -- TimeSeriesExpr"
override def examples: List[String] = List("hourOfDay", "HOUR_OF_DAY")
}
case object TimeSpan extends Word {
override def name: String = "time-span"
def matches(stack: List[Any]): Boolean = stack match {
case (_: String) :: (_: String) :: _ => true
case _ => false
}
def execute(context: Context): Context = {
val zone = context.variables.get("tz") match {
case Some(z: String) => ZoneId.of(z)
case Some(z: ZoneId) => z
case _ => ZoneOffset.UTC
}
val newStack = context.stack match {
case (e: String) :: (s: String) :: stack => MathExpr.TimeSpan(s, e, zone) :: stack
case _ => invalidStack
}
context.copy(stack = newStack)
}
override def summary: String =
"""
|Generates a signal line based on the specified time range. The line will be 1
|within the range and 0 for all other times. The format of the start and end times
|is the same as the start and end [time parameters](Time-Parameters) on the Graph
|API. If the time zone is not explicitly specified, then the value from the `tz`
|variable will get used. The default value for the `tz` variable is the primary
|time zone used for the graph.
|
|The following named times are supported for time spans:
|
|| Name | Description |
||----------|-------------------------------------------------------------|
|| gs | Graph start time. |
|| ge | Graph end time. |
|| s | Start time for the span, can only be used for the end time. |
|| e | End time for the span, can only be used for the start time. |
|| now | Current time. |
|| epoch | January 1, 1970 UTC. |
|
|Since: 1.6
""".stripMargin.trim
override def signature: String = "s:String e:String -- TimeSeriesExpr"
override def examples: List[String] = List("e-30m,ge", "2014-02-20T13:00,s%2B30m")
}
case object CommonQuery extends SimpleWord {
override def name: String = "cq"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: Query) :: _ :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case (q2: Query) :: (expr: Expr) :: stack =>
val newExpr = expr.rewrite {
case q1: Query => q1.and(q2)
}
newExpr :: stack
case (_: Query) :: stack =>
// Ignore items on the stack that are not expressions. So we pop the query and leave
// the rest of the stack unchanged.
stack
}
override def summary: String =
"""
|Recursively AND a common query to all queries in an expression. If the first parameter
|is not an expression, then it will be not be modified.
""".stripMargin.trim
override def signature: String = "Expr Query -- Expr"
override def examples: List[String] =
List(
"name,ssCpuUser,:eq,name,DiscoveryStatus_UP,:eq,:mul,nf.app,alerttest,:eq",
"42,nf.app,alerttest,:eq"
)
}
case object NamedRewrite extends Word {
override def name: String = "named-rewrite"
def matches(stack: List[Any]): Boolean = {
if (matcher.isDefinedAt(stack)) matcher(stack) else false
}
def execute(context: Context): Context = {
val pf = executor(Context(context.interpreter, Nil, Map.empty))
if (pf.isDefinedAt(context.stack))
context.copy(stack = pf(context.stack))
else
invalidStack
}
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: String) :: TimeSeriesType(_) :: TimeSeriesType(_) :: _ => true
}
protected def executor(context: Context): PartialFunction[List[Any], List[Any]] = {
case (n: String) :: TimeSeriesType(rw) :: (orig: Expr) :: stack =>
// If the original is already an expr type, e.g. a Query, then we should
// preserve it without modification. So we first match for Expr.
MathExpr.NamedRewrite(n, orig, rw, context) :: stack
case (n: String) :: TimeSeriesType(rw) :: TimeSeriesType(orig) :: stack =>
// This is a more general match that will coerce the original into a
// TimeSeriesExpr if it is not one already, e.g., a constant.
MathExpr.NamedRewrite(n, orig, rw, context) :: stack
}
override def summary: String =
"""
|Internal operation used by some macros to provide a more user friendly display
|expression. The expanded version will get used for evaluation, but if a new expression
|is generated from the parsed expression tree it will use the original version
|along with the named of the macro.
""".stripMargin.trim
override def signature: String = {
"original:TimeSeriesExpr rewritten:TimeSeriesExpr name:String -- TimeSeriesExpr"
}
override def examples: List[String] = Nil
}
case object ClampMin extends SimpleWord {
override def name: String = "clamp-min"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case DoubleType(_) :: TimeSeriesType(_) :: _ => true
case DoubleType(_) :: (_: StyleExpr) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case DoubleType(mn) :: TimeSeriesType(t) :: stack => MathExpr.ClampMin(t, mn) :: stack
case DoubleType(mn) :: (t: StyleExpr) :: stack =>
t.copy(expr = MathExpr.ClampMin(t.expr, mn)) :: stack
}
override def summary: String =
"""
|Restricts the minimum value of the output time series to the specified value. Values
|from the input time series that are greater than or equal to the minimum will not be
|changed. A common use-case is to allow for auto-scaled axis up to a specified bound.
|For more details see [:clamp-max](math-clamp‐max).
""".stripMargin.trim
override def signature: String = "TimeSeriesExpr Double -- TimeSeriesExpr"
override def examples: List[String] = List("name,sps,:eq,:sum,200e3")
}
case object ClampMax extends SimpleWord {
override def name: String = "clamp-max"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case DoubleType(_) :: TimeSeriesType(_) :: _ => true
case DoubleType(_) :: (_: StyleExpr) :: _ => true
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case DoubleType(mx) :: TimeSeriesType(t) :: stack => MathExpr.ClampMax(t, mx) :: stack
case DoubleType(mx) :: (t: StyleExpr) :: stack =>
t.copy(expr = MathExpr.ClampMax(t.expr, mx)) :: stack
}
override def summary: String =
"""
|Restricts the maximum value of the output time series to the specified value. Values
|from the input time series that are less than or equal to the maximum will not be
|changed.
|
|A common use-case is to allow for auto-scaled axis up to a specified bound. The
|axis parameters for controlling the [axis bounds](Axis-Bounds) have the following
|limitations:
|
|- They apply to everything on the axis and cannot be targeted to a specific line.
|- Are either absolute or set based on the data. For data with occasional spikes
| this can hide important details.
|
|Consider the following graph:
|
|
|
|The spike makes it difficult to make out any detail for other times. One option
|to handle this is to use an alternate [axis scale](Axis-Scale) such as
|[logarithmic](Axis-Scale#logarithmic) that gives a higher visual weight to the smaller
|values. However, it is often easier for a user to reason about a linear scale, in
|particular, for times when there is no spike in the graph window. If there is a known
|max reasonable value, then the `:clamp-max` operator can be used to restrict the line
|if and only if it exceeds the designated max. For example, if we limit the graph above
|to 25:
|
|
""".stripMargin.trim
override def signature: String = "TimeSeriesExpr Double -- TimeSeriesExpr"
override def examples: List[String] = List("name,sps,:eq,:sum,200e3")
}
sealed trait UnaryWord extends SimpleWord {
protected def matcher: PartialFunction[List[Any], Boolean] = {
case TimeSeriesType(_) :: _ => true
case (_: StyleExpr) :: _ => true
}
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr
protected def executor: PartialFunction[List[Any], List[Any]] = {
case TimeSeriesType(t) :: stack => newInstance(t) :: stack
case (t: StyleExpr) :: stack => t.copy(expr = newInstance(t.expr)) :: stack
}
override def signature: String = "TimeSeriesExpr -- TimeSeriesExpr"
override def examples: List[String] = List("0", "64", "-64")
}
case object Abs extends UnaryWord {
override def name: String = "abs"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Abs(t)
override def summary: String =
"""
|Compute a new time series where each interval has the absolute value of the input time
|series.
""".stripMargin.trim
}
case object Negate extends UnaryWord {
override def name: String = "neg"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Negate(t)
override def summary: String =
"""
|Compute a new time series where each interval has the negated value of the input time
|series.
""".stripMargin.trim
}
case object Sqrt extends UnaryWord {
override def name: String = "sqrt"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Sqrt(t)
override def summary: String =
"""
|Compute a new time series where each interval has the square root of the value from the
|input time series.
""".stripMargin.trim
}
case object PerStep extends UnaryWord {
override def name: String = "per-step"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.PerStep(t)
override def summary: String =
"""
|Converts a line from a rate per second to a rate based on the step size of the graph.
|This is useful for getting an estimate of the raw number of events for a given
|interval.
""".stripMargin.trim
}
sealed trait BinaryWord extends SimpleWord {
protected def matcher: PartialFunction[List[Any], Boolean] = {
case TimeSeriesType(_) :: TimeSeriesType(_) :: _ => true
case (_: StyleExpr) :: TimeSeriesType(_) :: _ => true
case TimeSeriesType(_) :: (_: StyleExpr) :: _ => true
}
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr
protected def executor: PartialFunction[List[Any], List[Any]] = {
case TimeSeriesType(t2) :: TimeSeriesType(t1) :: stack =>
newInstance(t1, t2) :: stack
case (t2: StyleExpr) :: TimeSeriesType(t1) :: stack =>
t2.copy(expr = newInstance(t1, t2.expr)) :: stack
case TimeSeriesType(t2) :: (t1: StyleExpr) :: stack =>
t1.copy(expr = newInstance(t1.expr, t2)) :: stack
}
override def signature: String = "TimeSeriesExpr TimeSeriesExpr -- TimeSeriesExpr"
override def examples: List[String] =
List("name,sps,:eq,42", "name,sps,:eq,:sum,name,requestsPerSecond,:eq,:max,(,name,),:by")
}
case object Add extends BinaryWord {
override def name: String = "add"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.Add(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a addNaN b)` where `a`
| and `b` are the corresponding intervals in the input time series. Sample:
|
|| :add | 3.0 | 0.0 | 1.0 | 1.0 | NaN |
||---------|-----|-----|-----|-----|-----|
|| Input 1 | 1.0 | 0.0 | 1.0 | 1.0 | NaN |
|| Input 2 | 2.0 | 0.0 | 0.0 | NaN | NaN |
|
|Use the [fadd](math-fadd) operator to get strict floating point behavior.
""".stripMargin.trim
}
case object Subtract extends BinaryWord {
override def name: String = "sub"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.Subtract(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a subtractNaN b)` where `a`
| and `b` are the corresponding intervals in the input time series.
|
|| :sub | 1.0 | 0.0 | 1.0 | 1.0 | NaN |
||---------|-----|-----|-----|-----|-----|
|| Input 1 | 2.0 | 0.0 | 1.0 | 1.0 | NaN |
|| Input 2 | 1.0 | 0.0 | 0.0 | NaN | NaN |
|
|Use the [fsub](math-fsub) operator to get strict floating point behavior.
""".stripMargin.trim
}
case object Multiply extends BinaryWord {
override def name: String = "mul"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.Multiply(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a * b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object Divide extends BinaryWord {
override def name: String = "div"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.Divide(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a / b)` where `a`
| and `b` are the corresponding intervals in the input time series. If `a` and `b` are 0,
| then 0 will be returned for the interval. If only `b` is 0, then NaN will be returned as
| the value for the interval. Sample data:
|
|| :div | 0.5 | 0.0 | NaN | NaN | NaN |
||---------|-----|-----|-----|-----|-----|
|| Input 1 | 1.0 | 0.0 | 1.0 | 1.0 | NaN |
|| Input 2 | 2.0 | 0.0 | 0.0 | NaN | NaN |
|
|Use the [fdiv](math-fdiv) operator to get strict floating point behavior.
""".stripMargin.trim
}
case object Power extends BinaryWord {
override def name: String = "pow"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.Power(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a power b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object GreaterThan extends BinaryWord {
override def name: String = "gt"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.GreaterThan(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a > b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object GreaterThanEqual extends BinaryWord {
override def name: String = "ge"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.GreaterThanEqual(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a >= b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object LessThan extends BinaryWord {
override def name: String = "lt"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.LessThan(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a < b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object LessThanEqual extends BinaryWord {
override def name: String = "le"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.LessThanEqual(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a <= b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object FAdd extends BinaryWord {
override def name: String = "fadd"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.FAdd(t1, t2)
}
override def summary: String =
"""
|Floating point addition operator. Compute a new time series where each interval has the
| value `(a + b)` where `a` and `b` are the corresponding intervals in the input time
| series.
|
|| :fadd | 3.0 | 0.0 | 1.0 | NaN | NaN |
||---------|-----|-----|-----|-----|-----|
|| Input 1 | 2.0 | 0.0 | 1.0 | 1.0 | NaN |
|| Input 2 | 1.0 | 0.0 | 0.0 | NaN | NaN |
|
|Note in many cases `NaN` will appear in data, e.g., if a node was brought up and started
|reporting in the middle of the time window for the graph. This can lead to confusing
|behavior if added to a line that does have data as the result will be `NaN`. Use the
|[add](math-add) operator to treat `NaN` values as zero for combining with other time
|series.
""".stripMargin.trim
}
case object FSubtract extends BinaryWord {
override def name: String = "fsub"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.FSubtract(t1, t2)
}
override def summary: String =
"""
|Floating point subtraction operator. Compute a new time series where each interval has the
| value `(a - b)` where `a` and `b` are the corresponding intervals in the input time
| series.
|
|| :fsub | 1.0 | 0.0 | 1.0 | NaN | NaN |
||---------|-----|-----|-----|-----|-----|
|| Input 1 | 2.0 | 0.0 | 1.0 | 1.0 | NaN |
|| Input 2 | 1.0 | 0.0 | 0.0 | NaN | NaN |
|
|Note in many cases `NaN` will appear in data, e.g., if a node was brought up and started
|reporting in the middle of the time window for the graph. This can lead to confusing
|behavior if added to a line that does have data as the result will be `NaN`. Use the
|[sub](math-sub) operator to treat `NaN` values as zero for combining with other time
|series.
""".stripMargin.trim
}
case object FMultiply extends BinaryWord {
override def name: String = "fmul"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.FMultiply(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a * b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object FDivide extends BinaryWord {
override def name: String = "fdiv"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.FDivide(t1, t2)
}
override def summary: String =
"""
|Floating point division operator. Compute a new time series where each interval has the
| value `(a / b)` where `a` and `b` are the corresponding intervals in the input time
| series.
|
|| :fdiv | 2.0 | NaN | Inf | NaN | NaN |
||---------|-----|-----|-----|-----|-----|
|| Input 1 | 2.0 | 0.0 | 1.0 | 1.0 | NaN |
|| Input 2 | 1.0 | 0.0 | 0.0 | NaN | NaN |
|
|Note in many cases `NaN` will appear in data, e.g., if a node was brought up and started
|reporting in the middle of the time window for the graph. Zero divided by zero can also
|occur due to lack of activity in some windows. Unless you really need strict floating
|point behavior, use the [div](math-div) operator to get behavior more appropriate for
|graphs.
""".stripMargin.trim
}
case object And extends BinaryWord {
override def name: String = "and"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.And(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a AND b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
case object Or extends BinaryWord {
override def name: String = "or"
def newInstance(t1: TimeSeriesExpr, t2: TimeSeriesExpr): TimeSeriesExpr = {
MathExpr.Or(t1, t2)
}
override def summary: String =
"""
|Compute a new time series where each interval has the value `(a OR b)` where `a`
| and `b` are the corresponding intervals in the input time series.
""".stripMargin.trim
}
sealed trait AggrWord extends SimpleWord {
protected def matcher: PartialFunction[List[Any], Boolean] = {
case (_: TimeSeriesExpr) :: _ => true
}
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr
protected def executor: PartialFunction[List[Any], List[Any]] = {
case DataExpr.GroupBy(a: DataExpr.Sum, _) :: stack if this == Sum => a :: stack
case DataExpr.GroupBy(a: DataExpr.Min, _) :: stack if this == Min => a :: stack
case DataExpr.GroupBy(a: DataExpr.Max, _) :: stack if this == Max => a :: stack
case (a: DataExpr.AggregateFunction) :: stack if this != Count => a :: stack
case (t: TimeSeriesExpr) :: stack => newInstance(t) :: stack
}
override def signature: String = "TimeSeriesExpr -- TimeSeriesExpr"
override def examples: List[String] =
List("name,sps,:eq,:sum", "name,sps,:eq,:max,(,nf.cluster,),:by")
}
case object Sum extends AggrWord {
override def name: String = "sum"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Sum(t)
override def summary: String =
"""
|Compute the sum of all the time series that result from the previous expression.
|To compute sum of all the time series that match a query, refer to
|[data/:sum](data-sum) instead.
""".stripMargin.trim
}
case object Count extends AggrWord {
override def name: String = "count"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Count(t)
override def summary: String =
"""
|Compute the count of all the time series that result from the previous expression.
|To compute count of all the time series that match a query, refer to
|[data/:count](data-count) instead.
""".stripMargin.trim
}
case object Min extends AggrWord {
override def name: String = "min"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Min(t)
override def summary: String =
"""
|Compute the min of all the time series that result from the previous expression.
|To compute min of all the time series that match a query, refer to
|[data/:min](data-min) instead.
""".stripMargin.trim
}
case object Max extends AggrWord {
override def name: String = "max"
def newInstance(t: TimeSeriesExpr): TimeSeriesExpr = MathExpr.Max(t)
override def summary: String =
"""
|Compute the max of all the time series that result from the previous expression.
|To compute max of all the time series that match a query, refer to
|[data/:max](data-max) instead.
""".stripMargin.trim
}
/**
* Compute estimated percentile values using counts for well known buckets. See spectator
* PercentileBuckets for more information. The input will be grouped by the `percentile` key
* with each key value being the bucket index. The output will be one line per requested
* percentile. The value should be a sum or group by. Other aggregate types, such as max, will
* automatically be converted to sum.
*/
case object Percentiles extends SimpleWord {
override def name: String = "percentiles"
protected def matcher: PartialFunction[List[Any], Boolean] = {
case DoubleListType(_) :: DataExprType(expr) :: _ =>
expr match {
case _: DataExpr.All => false
case _ => true
}
}
protected def executor: PartialFunction[List[Any], List[Any]] = {
case DoubleListType(pcts) :: DataExprType(t) :: stack =>
// Percentile always needs sum aggregate type, if others are used convert to sum
val expr = t match {
case af: AggregateFunction => DataExpr.GroupBy(toSum(af), List(TagKey.percentile))
case by: DataExpr.GroupBy => DataExpr.GroupBy(toSum(by.af), TagKey.percentile :: by.keys)
case _ =>
throw new IllegalArgumentException(":percentiles can only be used with :sum and :by")
}
MathExpr.Percentiles(expr, pcts) :: stack
}
private def toSum(af: AggregateFunction): DataExpr.Sum = {
DataExpr.Sum(af.query, offset = af.offset)
}
override def summary: String =
"""
|Estimate percentiles for a timer or distribution summary. The data must have been
|published appropriately to allow the approximation. If using
|[spectator](http://netflix.github.io/spectator/en/latest/), then see
|[PercentileTimer](http://netflix.github.io/spectator/en/latest/javadoc/spectator-api/com/netflix/spectator/api/histogram/PercentileTimer.html)
|and
|[PercentileDistributionSummary](http://netflix.github.io/spectator/en/latest/javadoc/spectator-api/com/netflix/spectator/api/histogram/PercentileDistributionSummary.html)
|helper classes.
|
|The percentile values can be shown in the legend using `$percentile`.
|
|Since: 1.5.0 (first in 1.5.0-rc.4)
""".stripMargin.trim
override def signature: String = "DataExpr percentiles:List -- Expr"
override def examples: List[String] = List(
"name,requestLatency,:eq,(,25,50,90,)"
)
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/util/BoundedPriorityBufferSuite.scala | <filename>atlas-core/src/test/scala/com/netflix/atlas/core/util/BoundedPriorityBufferSuite.scala
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
import munit.FunSuite
import java.util.Comparator
class BoundedPriorityBufferSuite extends FunSuite {
private def set(vs: Seq[Int]): Set[Integer] = {
vs.map(Integer.valueOf).toSet
}
test("buffer is bounded, natural order") {
val buffer = new BoundedPriorityBuffer[Integer](5, Comparator.naturalOrder[Integer]())
(0 until 10).foreach(v => buffer.add(v))
assertEquals(buffer.toList.toSet, set(0 until 5))
assert(buffer.size == 5)
}
test("buffer is bounded, reversed order") {
val cmp = Comparator.naturalOrder[Integer]().reversed()
val buffer = new BoundedPriorityBuffer[Integer](5, cmp)
(0 until 10).foreach(v => buffer.add(v))
assertEquals(buffer.toList.toSet, set(5 until 10))
assert(buffer.size == 5)
}
test("duplicate values, prefer first to come") {
val cmp: Comparator[(Integer, Integer)] = (a, b) => Integer.compare(a._1, b._1)
val buffer = new BoundedPriorityBuffer[(Integer, Integer)](5, cmp)
(0 until 10).foreach(v => buffer.add(1.asInstanceOf[Integer] -> (9 - v).asInstanceOf[Integer]))
assertEquals(buffer.toList.map(_._2).toSet, set(5 until 10))
}
test("foreach") {
val buffer = new BoundedPriorityBuffer[Integer](3, Comparator.naturalOrder[Integer]())
(0 until 10).foreach(v => buffer.add(v))
val builder = Set.newBuilder[Integer]
buffer.foreach(v => builder += v)
assertEquals(builder.result(), set(Seq(0, 1, 2)))
}
test("max size of 0") {
intercept[IllegalArgumentException] {
new BoundedPriorityBuffer[Integer](0, Comparator.naturalOrder[Integer]())
}
}
test("negative max size") {
intercept[IllegalArgumentException] {
new BoundedPriorityBuffer[Integer](-3, Comparator.naturalOrder[Integer]())
}
}
test("ejected value") {
val buffer = new BoundedPriorityBuffer[Integer](2, Comparator.naturalOrder[Integer]())
assertEquals(buffer.add(2), null)
assertEquals(buffer.add(3), null)
assertEquals(buffer.add(1).intValue(), 3)
assertEquals(buffer.add(4).intValue(), 4)
assertEquals(buffer.add(0).intValue(), 2)
}
}
|
dmuino/atlas | atlas-json/src/main/scala/com/netflix/atlas/json/Reflection.scala | <gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.json
import java.lang.reflect.ParameterizedType
import com.fasterxml.jackson.core.`type`.TypeReference
/**
* Helper functions for using reflection to access information about case
* classes.
*/
private[json] object Reflection {
type JType = java.lang.reflect.Type
// Taken from com.fasterxml.jackson.module.scala.deser.DeserializerTest.scala
def typeReference[T: Manifest]: TypeReference[T] = new TypeReference[T] {
override def getType: JType = typeFromManifest(manifest[T])
}
// Taken from com.fasterxml.jackson.module.scala.deser.DeserializerTest.scala
def typeFromManifest(m: Manifest[_]): JType = {
if (m.typeArguments.isEmpty) {
m.runtimeClass
} else
new ParameterizedType {
def getRawType: Class[_] = m.runtimeClass
def getActualTypeArguments: Array[JType] = m.typeArguments.map(typeFromManifest).toArray
def getOwnerType: JType = null
}
}
}
|
dmuino/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/model/StyleVocabularySuite.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import com.netflix.atlas.core.model.ModelExtractors.PresentationType
import com.netflix.atlas.core.stacklang.Interpreter
import munit.FunSuite
class StyleVocabularySuite extends FunSuite {
val interpreter = new Interpreter(StyleVocabulary.allWords)
def eval(s: String): StyleExpr = {
interpreter.execute(s).stack match {
case PresentationType(v) :: Nil => v
case v => throw new MatchError(v)
}
}
test("no additional style") {
val expr = eval(":true")
val expected = StyleExpr(DataExpr.Sum(Query.True), Map.empty)
assertEquals(expr, expected)
}
test("alpha") {
val expr = eval(":true,40,:alpha")
val expected = StyleExpr(DataExpr.Sum(Query.True), Map("alpha" -> "40"))
assertEquals(expr, expected)
}
test("color") {
val expr = eval(":true,f00,:color")
val expected = StyleExpr(DataExpr.Sum(Query.True), Map("color" -> "f00"))
assertEquals(expr, expected)
}
test("alpha > color") {
val expr = eval(":true,40,:alpha,f00,:color")
val expected = StyleExpr(DataExpr.Sum(Query.True), Map("color" -> "f00"))
assertEquals(expr, expected)
}
test("alpha > color > alpha") {
val expr = eval(":true,40,:alpha,f00,:color,60,:alpha")
val expected = StyleExpr(DataExpr.Sum(Query.True), Map("color" -> "60ff0000"))
assertEquals(expr, expected)
}
}
|
dmuino/atlas | atlas-postgres/src/test/scala/com/netflix/atlas/postgres/PostgresCopyBufferSuite.scala | <gh_stars>1000+
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.postgres
import com.netflix.atlas.core.util.SortedTagMap
import io.zonky.test.db.postgres.embedded.EmbeddedPostgres
import java.sql.Statement
import scala.util.Using
import org.postgresql.copy.CopyManager
import org.postgresql.core.BaseConnection
import munit.FunSuite
import java.sql.Connection
import java.sql.DriverManager
import java.sql.ResultSet
class PostgresCopyBufferSuite extends FunSuite {
private val buffers = List(
"text" -> new TextCopyBuffer(1024),
"binary" -> new BinaryCopyBuffer(1024, 1)
)
private var postgres: EmbeddedPostgres = _
private var connection: Connection = _
private var copyManager: CopyManager = _
override def beforeAll(): Unit = {
postgres = EmbeddedPostgres
.builder()
.setCleanDataDirectory(true)
.setPort(54321)
.start()
Class.forName("org.postgresql.Driver")
connection = DriverManager.getConnection(
"jdbc:postgresql://localhost:54321/postgres",
"postgres",
"postgres"
)
copyManager = connection.asInstanceOf[BaseConnection].getCopyAPI
}
override def afterAll(): Unit = {
connection.close()
postgres.close()
}
private val tableName = "copy_buffer_test"
private def createTable(stmt: Statement, dataType: String): Unit = {
stmt.executeUpdate("create extension if not exists hstore")
stmt.executeUpdate(s"drop table if exists $tableName")
stmt.executeUpdate(s"create table $tableName(value $dataType)")
}
private def getData[T](stmt: Statement, f: ResultSet => T): List[T] = {
val builder = List.newBuilder[T]
val rs = stmt.executeQuery(s"select value from $tableName")
while (rs.next()) {
builder += f(rs)
}
builder.result()
}
private def stringTest(buffer: CopyBuffer, dataType: String): Unit = {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, dataType)
assert(buffer.putString(null).nextRow())
assert(buffer.putString("foo").nextRow())
assert(buffer.putString("bar").nextRow())
buffer.copyIn(copyManager, tableName)
assertEquals(getData(stmt, _.getString(1)), List(null, "foo", "bar"))
}
}
buffers.foreach {
case (name, buffer) =>
test(s"$name: varchar") {
stringTest(buffer, "varchar(40)")
}
test(s"$name: text") {
stringTest(buffer, "text")
}
test(s"$name: json") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "json")
assert(buffer.putTagsJson(SortedTagMap.empty).nextRow())
assert(buffer.putTagsJson(SortedTagMap("a" -> "1")).nextRow())
assert(buffer.putTagsJson(SortedTagMap("a" -> "1", "b" -> "2")).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
"{}",
"""{"a":"1"}""",
"""{"a":"1","b":"2"}"""
)
val actual = getData(stmt, _.getString(1))
assertEquals(actual, expected)
}
}
test(s"$name: jsonb") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "jsonb")
assert(buffer.putTagsJsonb(SortedTagMap.empty).nextRow())
assert(buffer.putTagsJsonb(SortedTagMap("a" -> "1")).nextRow())
assert(buffer.putTagsJsonb(SortedTagMap("a" -> "1", "b" -> "2")).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
"{}",
"""{"a": "1"}""",
"""{"a": "1", "b": "2"}"""
)
val actual = getData(stmt, _.getString(1))
assertEquals(actual, expected)
}
}
test(s"$name: hstore") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "hstore")
assert(buffer.putTagsHstore(SortedTagMap.empty).nextRow())
assert(buffer.putTagsHstore(SortedTagMap("a" -> "1")).nextRow())
assert(buffer.putTagsHstore(SortedTagMap("a" -> "1", "b" -> "2")).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
"",
""""a"=>"1"""",
""""a"=>"1", "b"=>"2""""
)
val actual = getData(stmt, _.getString(1))
assertEquals(actual, expected)
}
}
test(s"$name: smallint") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "smallint")
assert(buffer.putShort(0).nextRow())
assert(buffer.putShort(Short.MinValue).nextRow())
assert(buffer.putShort(Short.MaxValue).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
0.toShort,
Short.MinValue,
Short.MaxValue
)
val actual = getData(stmt, _.getShort(1))
assertEquals(actual, expected)
}
}
test(s"$name: integer") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "integer")
assert(buffer.putInt(0).nextRow())
assert(buffer.putInt(Int.MinValue).nextRow())
assert(buffer.putInt(Int.MaxValue).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
0,
Int.MinValue,
Int.MaxValue
)
val actual = getData(stmt, _.getInt(1))
assertEquals(actual, expected)
}
}
test(s"$name: bigint") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "bigint")
assert(buffer.putLong(0L).nextRow())
assert(buffer.putLong(Long.MinValue).nextRow())
assert(buffer.putLong(Long.MaxValue).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
0L,
Long.MinValue,
Long.MaxValue
)
val actual = getData(stmt, _.getLong(1))
assertEquals(actual, expected)
}
}
test(s"$name: double precision") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "double precision")
assert(buffer.putDouble(0.0).nextRow())
assert(buffer.putDouble(Double.MinValue).nextRow())
assert(buffer.putDouble(Double.MaxValue).nextRow())
assert(buffer.putDouble(Double.MinPositiveValue).nextRow())
assert(buffer.putDouble(Double.NaN).nextRow())
assert(buffer.putDouble(Double.NegativeInfinity).nextRow())
assert(buffer.putDouble(Double.PositiveInfinity).nextRow())
buffer.copyIn(copyManager, tableName)
val expected = List(
0.0,
Double.MinValue,
Double.MaxValue,
Double.MinPositiveValue,
Double.NaN,
Double.NegativeInfinity,
Double.PositiveInfinity
)
val actual = getData(stmt, _.getDouble(1))
actual.zip(expected).foreach {
case (a, e) => if (a.isNaN) assert(e.isNaN) else assertEquals(a, e)
}
}
}
test(s"$name: double precision[]") {
buffer.clear()
Using.resource(connection.createStatement()) { stmt =>
createTable(stmt, "double precision[]")
val expected = List(
0.0,
Double.MinValue,
Double.MaxValue,
Double.MinPositiveValue,
Double.NaN,
Double.NegativeInfinity,
Double.PositiveInfinity
)
assert(buffer.putDoubleArray(expected.toArray).nextRow())
buffer.copyIn(copyManager, tableName)
val actual = getData(stmt, _.getArray(1).getArray.asInstanceOf[Array[java.lang.Double]])
assertEquals(actual.size, 1)
actual.foreach { data =>
data.toList.zip(expected).foreach {
case (a, e) => if (a.isNaN) assert(e.isNaN) else assertEquals(a.doubleValue(), e)
}
}
}
}
}
}
|
dmuino/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/db/BlockStore.scala | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.db
import com.netflix.atlas.core.model._
import com.netflix.atlas.core.util.ArrayHelper
trait BlockStore {
/** Returns true if this block store has data. */
def hasData: Boolean
/** Removes all blocks where the start time is before the specified cutoff. */
def cleanup(cutoff: Long): Unit
/** Updates the store with a datapoint. */
def update(timestamp: Long, value: Double, rollup: Boolean = false): Unit
/**
* Force and update with no data. This can be used to trigger the rotation and compression
* of the currently updating block if no data is received after the block boundary. The
* timestamp should be the start of the block to process.
*/
def update(timestamp: Long): Unit
def blockList: List[Block]
def fetch(start: Long, end: Long, aggr: Int): Array[Double]
def get(start: Long): Option[Block]
}
object MemoryBlockStore {
def newArrayBlock(start: Long, size: Int): MutableBlock = {
CompressedArrayBlock(start, size)
}
def newRollupBlock(start: Long, size: Int): RollupBlock = {
RollupBlock(
CompressedArrayBlock(start, size),
CompressedArrayBlock(start, size),
CompressedArrayBlock(start, size),
CompressedArrayBlock(start, size)
)
}
}
class MemoryBlockStore(step: Long, blockSize: Int, numBlocks: Int) extends BlockStore {
import com.netflix.atlas.core.db.MemoryBlockStore._
private val blockStep = step * blockSize
private[db] val blocks: Array[MutableBlock] = new Array[MutableBlock](numBlocks)
private[db] var currentPos: Int = 0
private[db] var currentBlock: MutableBlock = _
private def next(pos: Int): Int = {
(pos + 1) % numBlocks
}
private def alignStart(start: Long): Long = {
start - start % blockStep
}
private def newBlock(start: Long, rollup: Boolean): Unit = {
require(start % blockStep == 0, s"start time $start is not on block boundary")
val oldBlock = currentBlock
val newBlock =
if (rollup)
newRollupBlock(start, blockSize)
else
newArrayBlock(start, blockSize)
BlockStats.inc(newBlock)
blocks(currentPos) = oldBlock
currentBlock = newBlock
currentPos = next(currentPos)
if (blocks(currentPos) != null) BlockStats.dec(blocks(currentPos))
blocks(currentPos) = currentBlock
hasData = true
}
var hasData: Boolean = false
def cleanup(cutoff: Long): Unit = {
var pos = 0
var nonEmpty = false
while (pos < numBlocks) {
val block = blocks(pos)
if (block != null && block.start < cutoff) {
if (blocks(pos) != null) BlockStats.dec(blocks(pos))
blocks(pos) = null
if (block eq currentBlock) currentBlock = null
} else {
nonEmpty = nonEmpty || (block != null)
}
pos += 1
}
hasData = nonEmpty
}
def update(timestamp: Long, value: Double, rollup: Boolean): Unit = {
if (currentBlock == null) {
currentBlock =
if (rollup)
newRollupBlock(alignStart(timestamp), blockSize)
else
newArrayBlock(alignStart(timestamp), blockSize)
BlockStats.inc(currentBlock)
currentPos = next(currentPos)
if (blocks(currentPos) != null) BlockStats.dec(blocks(currentPos))
blocks(currentPos) = currentBlock
hasData = true
}
var pos = ((timestamp - currentBlock.start) / step).asInstanceOf[Int]
if (pos >= blockSize) {
// Exceeded window of current block, create a new one for the next
// interval
newBlock(alignStart(timestamp), rollup)
pos = ((timestamp - currentBlock.start) / step).asInstanceOf[Int]
currentBlock.update(pos, value)
} else if (pos < 0) {
// Out of order update received for an older block, try to update the
// previous block
val previousPos = (currentPos - 1) % numBlocks
if (previousPos > 0 && blocks(previousPos) != null) {
val previousBlock = blocks(previousPos)
pos = ((timestamp - previousBlock.start) / step).asInstanceOf[Int]
if (pos >= 0 && pos < blockSize) {
previousBlock.update(pos, value)
}
}
} else {
currentBlock.update(pos, value)
}
}
def update(timestamp: Long): Unit = {
if (currentBlock != null && currentBlock.start == timestamp) {
blocks(currentPos) = currentBlock
currentBlock = null
}
}
def update(start: Long, values: List[Double]): Unit = {
var t = start
values.foreach { v =>
update(t, v)
t += step
}
}
private def fill(blk: Block, buf: Array[Double], start: Long, end: Long, aggr: Int): Unit = {
val s = start / step
val e = end / step
val bs = blk.start / step
val be = bs + blockSize - 1
if (e >= bs && s <= be) {
val spos = if (s > bs) s else bs
val epos = if (e < be) e else be
var i = spos
while (i <= epos) {
buf((i - s).toInt) = blk.get((i - bs).toInt) // TODO: use proper aggregate
i += 1
}
}
}
def blockList: List[Block] = {
val bs = List.newBuilder[Block]
var pos = 0
while (pos < numBlocks) {
val b = blocks(pos)
if (b != null) bs += b
pos += 1
}
bs.result()
}
def fetch(start: Long, end: Long, aggr: Int): Array[Double] = {
val size = (end / step - start / step).asInstanceOf[Int] + 1
val buffer = ArrayHelper.fill(size, Double.NaN)
var pos = 0
while (pos < numBlocks) {
if (blocks(pos) != null) fill(blocks(pos), buffer, start, end, aggr)
pos += 1
}
buffer
}
def get(start: Long): Option[Block] = {
var pos = 0
var block: Block = null
while (block == null && pos < numBlocks) {
val b = blocks(pos)
if (b != null && b.start == start) block = b
pos += 1
}
Option(block)
}
override def toString: String = {
val buf = new StringBuilder
(0 until numBlocks).foreach { i =>
buf.append(i.toString).append(" => ").append(blocks(i))
if (i == currentPos) buf.append(" (current)")
buf.append("\n")
}
buf.toString
}
}
|
dmuino/atlas | atlas-eval/src/main/scala/com/netflix/atlas/eval/model/LwcMessages.scala | <reponame>dmuino/atlas
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.model
import akka.util.ByteString
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.core.JsonToken
import com.netflix.atlas.akka.ByteStringInputStream
import com.netflix.atlas.akka.DiagnosticMessage
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.json.Json
import com.netflix.atlas.json.JsonParserHelper._
import com.netflix.atlas.json.JsonSupport
import java.io.ByteArrayOutputStream
/**
* Helpers for working with messages coming back from the LWCAPI service.
*/
object LwcMessages {
/**
* Parse the message string into an internal model object based on the type.
*/
def parse(msg: ByteString): AnyRef = {
parse(Json.newJsonParser(new ByteStringInputStream(msg)))
}
/**
* Parse the message string into an internal model object based on the type.
*/
def parse(msg: String): AnyRef = {
parse(Json.newJsonParser(msg))
}
private def parse(parser: JsonParser): AnyRef = {
// This is a performance critical part of the code so the parsing is done by
// hand rather than using ObjectMapper to minimize allocations and get peak
// performance.
try {
// All
var typeDesc: String = null
// LwcExpression
var expression: String = null
var step: Long = -1L
// LwcSubscription
// - expression
var metrics: List[LwcDataExpr] = Nil
// LwcDatapoint
var timestamp: Long = -1L
var id: String = null
var tags: Map[String, String] = Map.empty
var value: Double = Double.NaN
// LwcDiagnosticMessage
// - id
// - message: DiagnosticMessage
var diagnosticMessage: DiagnosticMessage = null
// LwcHeartbeat
// - timestamp
// - step
// DiagnosticMessage
// - message: String
var message: String = null
// Actually do the parsing work
foreachField(parser) {
case "type" => typeDesc = nextString(parser)
case "expression" => expression = nextString(parser)
case "step" => step = nextLong(parser)
case "metrics" => metrics = parseDataExprs(parser)
case "timestamp" => timestamp = nextLong(parser)
case "id" => id = nextString(parser)
case "tags" => tags = parseTags(parser)
case "value" => value = nextDouble(parser)
case "message" =>
val t = parser.nextToken()
if (t == JsonToken.VALUE_STRING)
message = parser.getText
else
diagnosticMessage = parseDiagnosticMessage(parser)
case _ => skipNext(parser)
}
typeDesc match {
case "expression" => LwcExpression(expression, step)
case "subscription" => LwcSubscription(expression, metrics)
case "datapoint" => LwcDatapoint(timestamp, id, tags, value)
case "diagnostic" => LwcDiagnosticMessage(id, diagnosticMessage)
case "heartbeat" => LwcHeartbeat(timestamp, step)
case _ => DiagnosticMessage(typeDesc, message, None)
}
} finally {
parser.close()
}
}
private[model] def parseDataExprs(parser: JsonParser): List[LwcDataExpr] = {
val builder = List.newBuilder[LwcDataExpr]
foreachItem(parser) {
var id: String = null
var expression: String = null
var step: Long = -1L
foreachField(parser) {
case "id" => id = nextString(parser)
case "expression" => expression = nextString(parser)
case "step" | "frequency" => step = nextLong(parser)
case _ => skipNext(parser)
}
builder += LwcDataExpr(id, expression, step)
}
builder.result()
}
private def parseDiagnosticMessage(parser: JsonParser): DiagnosticMessage = {
var typeDesc: String = null
var message: String = null
foreachField(parser) {
case "type" => typeDesc = nextString(parser)
case "message" => message = nextString(parser)
case _ => skipNext(parser)
}
DiagnosticMessage(typeDesc, message, None)
}
private def parseTags(parser: JsonParser): Map[String, String] = {
val builder = new SmallHashMap.Builder[String, String](30)
foreachField(parser) {
case k => builder.add(k, nextString(parser))
}
builder.result
}
private val Expression = 0
private val Subscription = 1
private val Datapoint = 2
private val LwcDiagnostic = 3
private val Diagnostic = 4
private val Heartbeat = 5
/**
* Encode messages using Jackson's smile format into a ByteString.
*/
def encodeBatch(msgs: Seq[AnyRef]): ByteString = {
encodeBatch(msgs, new ByteArrayOutputStream())
}
/**
* Encode messages using Jackson's smile format into a ByteString. The
* `ByteArrayOutputStream` will be reset and used as a buffer for encoding
* the data.
*/
def encodeBatch(msgs: Seq[AnyRef], baos: ByteArrayOutputStream): ByteString = {
baos.reset()
val gen = Json.newSmileGenerator(baos)
try {
gen.writeStartArray()
msgs.foreach {
case msg: LwcExpression =>
gen.writeNumber(Expression)
gen.writeString(msg.expression)
gen.writeNumber(msg.step)
case msg: LwcSubscription =>
gen.writeNumber(Subscription)
gen.writeString(msg.expression)
gen.writeStartArray()
msg.metrics.foreach { m =>
gen.writeString(m.id)
gen.writeString(m.expression)
gen.writeNumber(m.step)
}
gen.writeEndArray()
case msg: LwcDatapoint =>
gen.writeNumber(Datapoint)
gen.writeNumber(msg.timestamp)
gen.writeString(msg.id)
gen.writeNumber(msg.tags.size)
msg.tags.foreachEntry { (k, v) =>
gen.writeString(k)
gen.writeString(v)
}
gen.writeNumber(msg.value)
case msg: LwcDiagnosticMessage =>
gen.writeNumber(LwcDiagnostic)
gen.writeString(msg.id)
gen.writeString(msg.message.typeName)
gen.writeString(msg.message.message)
case msg: DiagnosticMessage =>
gen.writeNumber(Diagnostic)
gen.writeString(msg.typeName)
gen.writeString(msg.message)
case msg: LwcHeartbeat =>
gen.writeNumber(Heartbeat)
gen.writeNumber(msg.timestamp)
gen.writeNumber(msg.step)
case _ =>
throw new MatchError("foo")
}
gen.writeEndArray()
} finally {
gen.close()
}
ByteString.fromArrayUnsafe(baos.toByteArray)
}
/**
* Parse a set of messages that were encoded with `encodeBatch`.
*/
def parseBatch(msgs: ByteString): List[AnyRef] = {
parseBatch(Json.newSmileParser(new ByteStringInputStream(msgs)))
}
private def parseBatch(parser: JsonParser): List[AnyRef] = {
val builder = List.newBuilder[AnyRef]
try {
foreachItem(parser) {
parser.getIntValue match {
case Expression =>
builder += LwcExpression(parser.nextTextValue(), parser.nextLongValue(-1L))
case Subscription =>
val expression = parser.nextTextValue()
val dataExprs = List.newBuilder[LwcDataExpr]
foreachItem(parser) {
dataExprs += LwcDataExpr(
parser.getText,
parser.nextTextValue(),
parser.nextLongValue(-1L)
)
}
builder += LwcSubscription(expression, dataExprs.result())
case Datapoint =>
val timestamp = parser.nextLongValue(-1L)
val id = parser.nextTextValue()
val tags = parseTags(parser, parser.nextIntValue(0))
val value = nextDouble(parser)
builder += LwcDatapoint(timestamp, id, tags, value)
case LwcDiagnostic =>
val id = parser.nextTextValue()
val typeName = parser.nextTextValue()
val message = parser.nextTextValue()
builder += LwcDiagnosticMessage(id, DiagnosticMessage(typeName, message, None))
case Diagnostic =>
val typeName = parser.nextTextValue()
val message = parser.nextTextValue()
builder += DiagnosticMessage(typeName, message, None)
case Heartbeat =>
val timestamp = parser.nextLongValue(-1L)
val step = parser.nextLongValue(-1L)
builder += LwcHeartbeat(timestamp, step)
case v =>
throw new MatchError(s"invalid type id: $v")
}
}
} finally {
parser.close()
}
builder.result()
}
private def parseTags(parser: JsonParser, n: Int): Map[String, String] = {
if (n == 0) {
SmallHashMap.empty[String, String]
} else {
val builder = new SmallHashMap.Builder[String, String](2 * n)
var i = 0
while (i < n) {
val k = parser.nextTextValue()
val v = parser.nextTextValue()
builder.add(k, v)
i += 1
}
builder.result
}
}
def toSSE(msg: JsonSupport): ByteString = {
val prefix = msg match {
case _: LwcSubscription => subscribePrefix
case _: LwcDatapoint => metricDataPrefix
case _: LwcDiagnosticMessage => diagnosticPrefix
case _: LwcHeartbeat => heartbeatPrefix
case _ => defaultPrefix
}
prefix ++ ByteString(msg.toJson) ++ suffix
}
private val subscribePrefix = ByteString("info: subscribe ")
private val metricDataPrefix = ByteString("data: metric ")
private val diagnosticPrefix = ByteString("data: diagnostic ")
private val heartbeatPrefix = ByteString("data: heartbeat ")
private val defaultPrefix = ByteString("data: ")
private val suffix = ByteString("\r\n\r\n")
}
|
MaksGovor/FP-labs | lab2_OOP/src/test/scala/objsets/PostSetSuite.scala | <gh_stars>0
package objsets
import PostReader.allposts
class PostSetSuite extends munit.FunSuite {
val set1 = new Empty()
val set2: PostSet = set1.incl(new Post("a", "a body", 20))
val set3: PostSet = set2.incl(new Post("b", "b body", 20))
val c = new Post("c", "c body", 7)
val d = new Post("d", "d body", 9)
val set4c: PostSet = set3.incl(c)
val set4d: PostSet = set3.incl(d)
val set5: PostSet = set4c.incl(d)
val set6: PostSet = set5.union(new Empty())
val set7: PostSet = new Empty().union(set5)
def asSet(posts: PostSet): Set[Post] = {
var res = Set[Post]()
posts.foreach(res += _)
res
}
def size(set: PostSet): Int = asSet(set).size
test("filter: on empty set") {
assertEquals(size(set1.filter(tw => tw.user == "a")), 0)
}
test("filter: a on set5") {
assertEquals(size(set5.filter(tw => tw.user == "a")), 1)
}
test("filter: twenty on set5") {
assertEquals(size(set5.filter(tw => tw.likes == 20)), 2)
}
test("union: set4c and set4d") {
assertEquals(size(set4c.union(set4d)), 4)
}
test("union: with empty set1") {
assertEquals(size(set5.union(set1)), 4)
}
test("union: with empty set2") {
assertEquals(size(set1.union(set5)), 4)
}
test("descending: set5") {
val trends = set5.descendingByLikes
assert(!trends.isEmpty)
assert(trends.head.user == "a" || trends.head.user == "b")
}
test("mostLiked: set1") {
intercept[java.util.NoSuchElementException] {
set1.mostLiked
}
}
test("mostLiked: set6 = set5.union(new Empty())") {
val post: Post = set6.mostLiked
assert(post.user == "a" || post.user == "b")
assertEquals(post.likes, 20)
}
test("mostLiked: set7 = new Empty().union(set5)") {
val post: Post = set7.mostLiked
assert(post.user == "a" || post.user == "b")
assertEquals(post.likes, 20)
}
test("mostLiked: with empty set1.union(allposts)") {
val post: Post = set1.union(allposts).mostLiked
assert(post.user == "mashable")
assertEquals(post.likes, 345)
}
test("mostLiked: with empty allposts.union(set1)") {
val post: Post = allposts.union(set1).mostLiked
assert(post.user == "mashable")
assertEquals(post.likes, 345)
}
test("descendingByLikes: set1 = new Empty()") {
val likedList: PostList = set1.descendingByLikes
assertEquals(likedList.isEmpty, true)
intercept[java.util.NoSuchElementException] { likedList.head }
intercept[java.util.NoSuchElementException] { likedList.tail }
}
test("descendingByLikes: set5") {
assertEquals(set5.descendingByLikes.isEmpty, false)
}
import scala.concurrent.duration._
override val munitTimeout: FiniteDuration = 10.seconds
}
|
MaksGovor/FP-labs | lab_recursion/src/test/scala/recfun/calcExpressionSuite.scala | <reponame>MaksGovor/FP-labs
package recfun
import org.scalatest.funsuite.AnyFunSuite
import org.junit.runner.RunWith
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class calcExpressionSuite extends AnyFunSuite {
import Main.calcExpression
test("calcExpression: x = 3") {
assert(calcExpression(3) === 6)
}
test("calcExpression: x = 2") {
assert(calcExpression(2) === 2)
}
test("calcExpression: x = -1") {
assert(calcExpression(-1) === 0)
}
test("calcExpression: x = 0") {
assertThrows[IllegalArgumentException] {
calcExpression(0)
}
}
test("calcExpression: x = 1") {
assertThrows[IllegalArgumentException] {
calcExpression(1)
}
}
}
|
MaksGovor/FP-labs | quickcheck/build.sbt | name := "quickcheck"
version := "0.1"
scalaVersion := "2.13.6"
scalacOptions ++= Seq("-language:implicitConversions", "-deprecation")
libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % Test
libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.15.4"
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-a", "-v", "-s") |
MaksGovor/FP-labs | codecs/project/buildSettings.sbt | <filename>codecs/project/buildSettings.sbt
libraryDependencies += "com.typesafe.play" %% "play-json" % "2.7.4"
libraryDependencies += "commons-codec" % "commons-codec" % "1.10"
|
MaksGovor/FP-labs | quickcheck/src/main/scala/quickcheck/QuickCheck.scala | package quickcheck
import org.scalacheck._
import Arbitrary._
import Gen._
import Prop._
import scala.annotation.tailrec
abstract class QuickCheckHeap extends Properties("Heap") with IntHeap {
lazy val genHeap: Gen[H] = for {
x <- arbitrary[A]
heap <- oneOf(const(empty), genHeap)
} yield insert(x, heap)
implicit lazy val arbHeap: Arbitrary[H] = Arbitrary(genHeap)
@tailrec
final def heapEquals(h1: H, h2: H): Boolean = (h1, h2) match {
case (Nil, Nil) => true
case _ if (isEmpty(h1) || isEmpty(h2)) => false
case (hs1, hs2) => {
findMin(hs1) == findMin(hs2) && heapEquals(deleteMin(hs1), deleteMin(hs2))
}
}
@tailrec
final def checkElem(h: H, el: Int): Boolean = h match {
case Nil => false
case hs1 => findMin(hs1) == el || checkElem(deleteMin(hs1), el)
}
def len(h: H): Int = {
@tailrec
def lenAcc(h: H, acc: Int): Int = h match {
case Nil => acc
case hs => lenAcc(deleteMin(hs), acc + 1)
}
lenAcc(h, 0)
}
def getSorted(h: H): H = {
@tailrec
def getSortedAcc(h: H, acc: H): H = h match {
case Nil => acc
case hs => getSortedAcc(deleteMin(hs), insert(findMin(hs), acc))
}
getSortedAcc(h, empty)
}
property("gen1") = forAll { (h: H) =>
val m = if (isEmpty(h)) 0 else findMin(h)
findMin(insert(m, h)) == m
}
property("order of finding the minimal is correct") = forAll { (x: Int, y: Int) =>
val heap = insert(x, insert(y, empty))
val min1 = findMin(heap)
val min2 = findMin(deleteMin(heap))
min1 == ord.min(x, y) && min2 == ord.max(x, y)
}
property("minimum element of two heaps") = forAll { (x: Int, y: Int) =>
val heap = insert(x, insert(y, empty))
val actual = findMin(heap)
actual == ord.min(x, y)
}
property("inserting one item into an empty heap and removing it") = forAll { (x: Int) =>
val heap = insert(x, empty)
val emptyHeap = deleteMin(heap)
isEmpty(emptyHeap)
}
property("any heap is properly sorted") = forAll { (h: H) =>
heapEquals(h, getSorted(h))
}
property("any merging of heaps is discouraged") = forAll { (h1: H, h2: H) =>
val heap = meld(h1, h2)
heapEquals(heap, getSorted(heap))
}
property("any heap is sorted if you find/remove the minimum") = forAll { (h: H) =>
!isEmpty(h) ==> {
val heap = deleteMin(h)
heapEquals(heap, getSorted(heap))
}
}
property("minimum of the meld of two heaps - minimum of one of the shining") = forAll { (h1: H, h2: H) =>
!(isEmpty(h1) && isEmpty(h2)) ==> {
val min1 = findMin(h1)
val min2 = findMin(h2)
val heap = meld(h1, h2)
findMin(heap) == ord.min(min1, min2)
}
}
property("inserting two item into an empty heap and removing it") = forAll { (x: Int, y: Int) =>
val heap = insert(x, insert(y, empty))
val emptyHeap = deleteMin(deleteMin(heap))
isEmpty(emptyHeap)
}
property("adding an element larger than the minimum to the pile") = forAll{ (h: H, x: Int) =>
!isEmpty(h) && x < findMin(h) ==> {
val heap = insert(x, h)
findMin(heap) == x
}
}
property("merging two heaps and comparing the same without the minimum with insertion") = forAll { (h1: H, h2: H) =>
!isEmpty(h1) ==> {
val minH1 = findMin(h1)
val h1Mod = deleteMin(h1)
val h2Mod = insert(minH1, h2)
heapEquals(meld(h1, h2), meld(h1Mod, h2Mod))
}
}
property("order of the meld arguments does not matter") = forAll { (h1: H, h2: H) =>
heapEquals(meld(h1, h2), meld(h2, h1))
}
property("merging any heap with an empty heap gives it the equivalent heap") = forAll { (h: H) =>
heapEquals(h, meld(h, empty))
}
property("inserted element is in the heap") = forAll { (h: H, x: Int) =>
!isEmpty(h) && x < findMin(h) ==> {
val heap = insert(x, h)
checkElem(heap, x)
}
}
property("after removing the minimum, the length of the pile decreases") = forAll { (h: H) =>
!isEmpty(h) ==> {
val heap = deleteMin(h)
len(heap) == len(h) - 1
}
}
property("length of the meld heap is equal to the sum of the lengths") = forAll { (h1: H, h2: H) =>
val heap = meld(h1, h2)
len(heap) == len(h1) + len(h2)
}
}
|
MaksGovor/FP-labs | codecs/build.sbt | <reponame>MaksGovor/FP-labs
scalaVersion := "2.13.1"
scalacOptions ++= Seq("-deprecation")
libraryDependencies ++= Seq(
"org.typelevel" %% "jawn-parser" % "1.0.0",
"org.scalacheck" %% "scalacheck" % "1.14.2" % Test,
"com.novocode" % "junit-interface" % "0.11" % Test
)
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-a", "-v", "-s")
|
MaksGovor/FP-labs | codecs/project/plugins.sbt | addSbtPlugin("org.scala-js" % "sbt-scalajs" % "0.6.28")
addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.9.7")
|
MaksGovor/FP-labs | lab_recursion/src/test/scala/recfun/PascalSuite.scala | package recfun
import org.scalatest.funsuite.AnyFunSuite
import org.junit.runner.RunWith
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class PascalSuite extends AnyFunSuite {
import Main.pascal
test("pascal: col=0,row=2") {
assert(pascal(0,2) === 1)
}
test("pascal: col=1,row=2") {
assert(pascal(1,2) === 2)
}
test("pascal: col=1,row=3") {
assert(pascal(1,3) === 3)
}
test("pascal: col=5,row=1") {
assertThrows[IllegalArgumentException] {
pascal(5, 1)
}
}
test("pascal: col=-1,row=1") {
assertThrows[IllegalArgumentException] {
pascal(-1, 1)
}
}
}
|
MaksGovor/FP-labs | lab2_OOP/src/main/scala/objsets/PostSet.scala | <reponame>MaksGovor/FP-labs
package objsets
import PostReader.allposts
import com.sun.org.apache.xpath.internal.compiler.Keywords
/**
* A class to represent posts.
*/
class Post(val user: String, val text: String, val likes: Int) {
override def toString: String =
"User: " + user + "\n" +
"Text: " + text + " [" + likes + "]"
}
/**
* This represents a set of objects of type `Post` in the form of a binary search
* tree. Every branch in the tree has two children (two `postset`s). There is an
* invariant which always holds: for every branch `b`, all elements in the left
* subtree are smaller than the post at `b`. The elements in the right subtree are
* larger.
*
* Note that the above structure requires us to be able to compare two posts (we
* need to be able to say which of two posts is larger, or if they are equal). In
* this implementation, the equality / order of posts is based on the post's text
* (see `def incl`). Hence, a `postset` could not contain two posts with the same
* text from different users.
*
*
* The advantage of representing sets as binary search trees is that the elements
* of the set can be found quickly. If you want to learn more you can take a look
* at the Wikipedia page [1], but this is not necessary in order to solve this
* assignment.
*
* [1] http://en.wikipedia.org/wiki/Binary_search_tree
*/
abstract class PostSet extends postsetInterface {
/**
* This method takes a predicate and returns a subset of all the elements
* in the original set for which the predicate is true.
*
* Question: Can we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def filter(p: Post => Boolean): PostSet = filterAcc(p, new Empty)
/**
* This is a helper method for `filter` that propagetes the accumulated posts.
*/
def filterAcc(p: Post => Boolean, acc: PostSet): PostSet
/**
* Returns a new `postset` that is the union of `postset`s `this` and `that`.
*
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def union(that: PostSet): PostSet = filterAcc(_ => true, that)
/**
* Returns the post from this set which has the greatest likes count.
*
* Calling `mostLiked` on an empty set should throw an exception of
* type `java.util.NoSuchElementException`.
*
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def mostLiked: Post
/**
* Returns a list containing all posts of this set, sorted by likes count
* in descending order. In other words, the head of the resulting list should
* have the highest likes count.
*
* Hint: the method `remove` on postset will be very useful.
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def descendingByLikes: PostList
/**
* The following methods are already implemented
*/
/**
* Returns a new `postset` which contains all elements of this set, and the
* the new element `post` in case it does not already exist in this set.
*
* If `this.contains(post)`, the current set is returned.
*/
def incl(post: Post): PostSet
/**
* Returns a new `postset` which excludes `post`.
*/
def remove(post: Post): PostSet
/**
* Tests if `post` exists in this `postset`.
*/
def contains(post: Post): Boolean
/**
* This method takes a function and applies it to every element in the set.
*/
def foreach(f: Post => Unit): Unit
}
class Empty extends PostSet {
/**
* The following methods are already implemented
*/
def contains(post: Post): Boolean = false
def incl(post: Post): PostSet = new NonEmpty(post, new Empty(), new Empty())
def remove(post: Post): PostSet = this
def foreach(f: Post => Unit): Unit = ()
/**
* This is a helper method for `filter` that propagetes the accumulated posts.
*/
override def filterAcc(p: Post => Boolean, acc: PostSet): PostSet = acc
def mostLiked: Post = throw new java.util.NoSuchElementException("It is impossible to define it on an empty set")
def descendingByLikes: PostList = Nil
}
class NonEmpty(elem: Post, left: PostSet, right: PostSet) extends PostSet {
def filterAcc(p: Post => Boolean, acc: PostSet): PostSet = {
if(p(elem)) left.filterAcc(p, right.filterAcc(p, acc.incl(elem)))
else left.filterAcc(p, right.filterAcc(p, acc))
}
/**
* The following methods are already implemented
*/
def contains(x: Post): Boolean =
if (x.text < elem.text)
left.contains(x)
else if (elem.text < x.text)
right.contains(x)
else true
def incl(x: Post): PostSet =
if (x.text < elem.text)
new NonEmpty(elem, left.incl(x), right)
else if (elem.text < x.text)
new NonEmpty(elem, left, right.incl(x))
else
this
def remove(tw: Post): PostSet =
if (tw.text < elem.text)
new NonEmpty(elem, left.remove(tw), right)
else if (elem.text < tw.text)
new NonEmpty(elem, left, right.remove(tw))
else
left.union(right)
def foreach(f: Post => Unit): Unit = {
f(elem)
left.foreach(f)
right.foreach(f)
}
def mostLiked: Post = {
def maxLike(p1: Post, p2: Post): Post = if (p1.likes > p2.likes) p1 else p2
maxLike(
if (left.isInstanceOf[Empty]) elem else left.mostLiked,
maxLike(if (right.isInstanceOf[Empty]) elem else right.mostLiked, elem)
)
}
def descendingByLikes: PostList = {
new Cons(mostLiked, remove(mostLiked).descendingByLikes)
}
}
trait PostList {
def head: Post
def tail: PostList
def isEmpty: Boolean
def foreach(f: Post => Unit): Unit = {
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
}
object Nil extends PostList {
def head = throw new java.util.NoSuchElementException("head of EmptyList")
def tail = throw new java.util.NoSuchElementException("tail of EmptyList")
def isEmpty = true
}
class Cons(val head: Post, val tail: PostList) extends PostList {
def isEmpty = false
}
object GoogleVsApple {
val google = List("android", "Android", "galaxy", "Galaxy", "nexus", "Nexus")
val apple = List("ios", "iOS", "iphone", "iPhone", "ipad", "iPad")
def getThematicPosts (posts: PostSet, keywords: List[String]): PostSet = {
posts.filter(p => keywords.exists(p.text.contains))
}
lazy val googleposts: PostSet = getThematicPosts(allposts, google)
lazy val appleposts: PostSet = getThematicPosts(allposts, apple)
/**
* A list of all posts mentioning a keyword from either apple or google,
* sorted by the number of likes.
*/
lazy val trending: PostList = googleposts.union(appleposts).descendingByLikes
}
object Main extends App {
// Print the trending posts
GoogleVsApple.trending foreach println
}
|
MaksGovor/FP-labs | streams/src/main/scala/streams/Solver.scala | <reponame>MaksGovor/FP-labs<filename>streams/src/main/scala/streams/Solver.scala<gh_stars>0
package streams
/**
* This component implements the solver for the Bloxorz game
*/
trait Solver extends GameDef {
/**
* Returns `true` if the block `b` is at the final position
*/
def done(b: Block): Boolean = b.b1 == goal && b.b2 == goal
/**
* This function takes two arguments: the current block `b` and
* a list of moves `history` that was required to reach the
* position of `b`.
*
* The `head` element of the `history` list is the latest move
* that was executed, i.e. the last move that was performed for
* the block to end up at position `b`.
*
* The function returns a lazy list of pairs: the first element of
* the each pair is a neighboring block, and the second element
* is the augmented history of moves required to reach this block.
*
* It should only return valid neighbors, i.e. block positions
* that are inside the terrain.
*/
def neighborsWithHistory(b: Block, history: List[Move]): LazyList[(Block, List[Move])] = {
for { (block, move) <- b.legalNeighbors.to(LazyList) } yield (block, move :: history)
}
/**
* This function returns the list of neighbors without the block
* positions that have already been explored. We will use it to
* make sure that we don't explore circular paths.
*/
def newNeighborsOnly(neighbors: LazyList[(Block, List[Move])],
explored: Set[Block]): LazyList[(Block, List[Move])] = {
for { (block, moves) <- neighbors if !(explored contains block) } yield (block, moves)
}
/**
* The function `from` returns the lazy list of all possible paths
* that can be followed, starting at the `head` of the `initial`
* lazy list.
*
* The blocks in the lazy list `initial` are sorted by ascending path
* length: the block positions with the shortest paths (length of
* move list) are at the head of the lazy list.
*
* The parameter `explored` is a set of block positions that have
* been visited before, on the path to any of the blocks in the
* lazy list `initial`. When search reaches a block that has already
* been explored before, that position should not be included a
* second time to avoid cycles.
*
* The resulting lazy list should be sorted by ascending path length,
* i.e. the block positions that can be reached with the fewest
* amount of moves should appear first in the lazy list.
*
* Note: the solution should not look at or compare the lengths
* of different paths - the implementation should naturally
* construct the correctly sorted lazy list.
*/
def from(initial: LazyList[(Block, List[Move])],
explored: Set[Block]): LazyList[(Block, List[Move])] = {
if (initial.isEmpty) LazyList.empty
else {
val more = for {
(block, moves) <- initial
next <- newNeighborsOnly(neighborsWithHistory(block, moves), explored)
} yield next
initial #::: from(more, explored ++ more.map(_._1))
}
}
/**
* The lazy list of all paths that begin at the starting block.
*/
lazy val pathsFromStart: LazyList[(Block, List[Move])] = from(Set((startBlock, Nil)).to(LazyList), Set.empty)
/**
* Returns a lazy list of all possible pairs of the goal block along
* with the history how it was reached.
*/
lazy val pathsToGoal: LazyList[(Block, List[Move])] = for {
(block, moves) <- pathsFromStart
if (done(block))
} yield (block, moves)
/**
* The (or one of the) shortest sequence(s) of moves to reach the
* goal. If the goal cannot be reached, the empty list is returned.
*
* Note: the `head` element of the returned list should represent
* the first move that the player should perform from the starting
* position.
*/
lazy val solution: List[Move] = pathsToGoal match {
case (_, moves) #:: xs => moves.reverse
case _ => Nil
}
}
|
MaksGovor/FP-labs | lab2_OOP/src/main/scala/objsets/PostSetInterface.scala | package objsets
/**Do not change signatures*/
trait postsetInterface {
def incl(post: Post): PostSet
def remove(post: Post): PostSet
def contains(post: Post): Boolean
def foreach(f: Post => Unit): Unit
def union(that: PostSet): PostSet
def mostLiked: Post
def descendingByLikes: PostList
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.