repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
---|---|---|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/TrickySet.scala | package net.tommay.sudoku
case class TrickySet (
name: String,
common: Stream[Int], // XXX Set?
rest: Stream[Int], // XXX Set?
eliminate: Stream[Int],
checkNeeded: Stream[Set[Int]])
// True to check an "inverted" TrickySet, where we scan the coincident
// rows and columns for needed digits rather than coincident squares.
// I personally don't do this, it's too hard.
//
// XXX checkCoincidentRowsColumnsForNeeded = False
// XXX Checking the eliminate set for newly forced Unknowns isn't implemented.
// It's also not something I do.
// Within a square, if the only possible places for a given digit are
// in the same row/col (i.e., in "common" but not in "rest"), then the
// digit can be removed from the possibilities for the rest of the
// Unknowns in that row/col ("eliminate"). This may cause the digit
// to be Forced in one of the squares containing the eliminate set
// ("checkNeeded").
//
// The reverse of the situation is also true. In a given row or
// column if it is only possible to place a given digit within a
// single square, then the digit can be eliminated from the other
// Unknowns of that square.
//
// Each tuple in trickySets contains three lists of Unknowns. If a
// digit is possible in the first list but not the second, it will be
// removed from the possibiles of the third.
//
// After we apply a tricky set with a particular digit, it may create
// some immediate placements:
// - There may be
// - Some of the Unknowns in the eliminate set may now have only one possibility.
object TrickySet {
val trickySets: Stream[TrickySet] = {
createTrickySets(false)
}
val inverseTrickySets: Stream[TrickySet] = {
createTrickySets(true)
}
val allTrickySets = {
trickySets ++ inverseTrickySets
}
def createTrickySets(inverse: Boolean) : Stream[TrickySet] = {
val rows = ExclusionSet.rows.toStream
val columns = ExclusionSet.columns.toStream
val squares = ExclusionSet.squares.toStream
val getRows = getCellSetsIncluding(rows)(_)
val getColumns = getCellSetsIncluding(columns)(_)
val set1 = for (square <- squares; row <- rows) yield
createTrickySetsFrom(inverse, square, row, getRows)
val set2 = for (square <- squares; col <- columns) yield
createTrickySetsFrom(inverse, square, col, getColumns)
(set1 ++ set2).flatten
}
def createTrickySetsFrom(
inverse: Boolean,
square: ExclusionSet,
row: ExclusionSet,
getSetsIncluding: Set[Int] => Stream[Set[Int]])
: Option[TrickySet] =
{
val common = square.cells.intersect(row.cells)
if (common.isEmpty) {
None
}
else {
val restOfRow = row.cells.diff(common)
val restOfSquare = square.cells.diff(common)
Some(
if (!inverse) {
TrickySet(
name = s"TrickySet ${square.name} ${row.name}",
common = common.toStream,
rest = restOfSquare.toStream,
eliminate = restOfRow.toStream,
checkNeeded = getSquaresIncluding(restOfRow)
.map(_ -- restOfRow))
}
else {
TrickySet(
name = s"Inverse TrickySet ${square.name} ${row.name}",
common = common.toStream,
rest = restOfRow.toStream,
eliminate = restOfSquare.toStream,
checkNeeded = getSetsIncluding(restOfSquare)
.map(_ -- restOfSquare))
}
)
}
}
// Given some ExclusionSets and some cellNumbers, return the
// ExclusionSets that contain them.
//
def getCellSetsIncluding
(exclusionSets: Stream[ExclusionSet])
(cells: Set[Int])
: Stream[Set[Int]] =
{
val cellSets = exclusionSets.map(_.cells)
cellSets.filter(_.intersect(cells).nonEmpty)
}
// Given some cellNumbers, return the ExclusionSet squares containing
// them.
//
def getSquaresIncluding(cells: Set[Int]) : Stream[Set[Int]] = {
getCellSetsIncluding(ExclusionSet.squares.toStream)(cells)
}
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Solver.scala | <reponame>tommay/spudoku-android
package net.tommay.sudoku
import scala.util.Random
case class Solver (
options: SolverOptions,
rnd: Option[Random],
puzzle: Puzzle,
unknowns: Stream[Unknown],
// steps is consed in reverse order. It is reversed when
// constructing a Solution.
steps: List[Step],
heuristics: Stream[Solver => Stream[Next]])
{
def place(cellNumber: Int, digit: Int) : Solver = {
val newPuzzle = puzzle.place(cellNumber, digit)
val newUnknowns = unknowns
.withFilter(_.cellNumber != cellNumber)
.map(_.place(cellNumber, digit))
this.copy(puzzle = newPuzzle, unknowns = newUnknowns)
}
// All of the calls in the solutions chain which eventually may come
// back around to top are tail calls, but scala doesn't do tail call
// optimization except for direct recursion. But we should make
// at most 81 deep nested calls to solutionsTop to solve a puzzle so
// we shouldn't blow the stack. If we do, there are always trampolines:
// http://stackoverflow.com/questions/16539488/why-scala-doesnt-make-tail-call-optimization
// scala.util.control.TailCalls
//
def solutionsTop : Stream[Solution] = {
if (isFinished) {
// We're finished, for some definition of finished. Return the
// Solution.
Stream(Solution(puzzle, steps.reverse))
}
else {
// Carry on with solutionsHeuristic
solutionsHeuristic
}
}
def isFinished : Boolean = {
// If therw are no more Unknowns we're always finished.
if (unknowns.isEmpty) {
true
}
else if (options.solveCompletely) {
false
}
else {
// We just need a hint. We can stop when there's a Step
// suitable for a hint.
steps.exists(_.placementOption.isDefined)
}
}
def solutionsHeuristic : Stream[Solution] = {
if (options.useHeuristics) {
val (rnd1, rnd2) = Solver.maybeSplit(rnd)
heuristics.flatMap {func =>
func(this) match {
case empty@Stream.Empty => empty
case stream =>
rnd1 match {
case None => stream
// We're only going to be using the head of the whole
// heuristics Stream (see below), so just keep one random
// element of the heuristic's results, if any, in the Stream.
case Some(rnd1) => Stream(Solver.pickRandom(rnd1, stream))
}
}
} match {
case Stream.Empty =>
// All heuristics returned empty lists.
solutionsStuck
case (next #:: _) =>
val nextSolver = this.copy(rnd = rnd2)
nextSolver.placeAndContinue(next)
}
}
else {
// Skip the heuristics and continue with solutionsStuck.
solutionsStuck
}
}
def placeAndContinue(next: Next) : Stream[Solution] = {
val placement = next.placement
val newSolver = place(placement.cellNumber, placement.digit)
val step = Step(
newSolver.puzzle, next.tjpe, Some(placement), next.cells)
val newSteps = step :: steps
val newSolver2 = newSolver.copy(steps = newSteps)
newSolver2.solutionsTop
}
def solutionsStuck : Stream[Solution] = {
// We get here because we can't place a digit using human-style
// heuristics, so we've either failed or we have to guess and
// recurse. We can distinguish by examining the cell with the
// fewest possibilities remaining, which is also the best cell to
// make a guess for.
// I tried using unknowns.minBy but it was slower, wtf.
val minUnknown = Util.minBy(unknowns, {x: Unknown => x.numPossible})
val cellNumber = minUnknown.cellNumber
// I tried matching on minUnknown.getPossible and only binding
// possible in case _, but it was slower, wtf.
val possible = minUnknown.getPossible
possible match {
case Nil =>
// Failed. No solutions.
Stream.Empty
case List(digit) =>
// One possibility. The choice is forced, no guessing. But
// we only use the force if a) we're guessing, and b) we're
// not using heuristics, because if we are then forcing is
// done by Heuristic.Forced via findForced.
if (options.useGuessing && !options.useHeuristics) {
val next = Next(Heuristic.ForcedGuess, Placement(cellNumber, digit),
List(cellNumber))
placeAndContinue(next)
}
else {
// There is a forced guess but we're not configured to use
// it. See if we can apply a TrickySet to create an
// opportunity.
applyOneTrickySetIfAllowed match {
case Some(newSolver) => newSolver.solutionsTop
case _ => Stream.Empty
}
}
case _ =>
// Multiple possibilities. Before we guess, see if it's
// possible to permanently apply a TrickySet to create
// possibiities for heuristics.
applyOneTrickySetIfAllowed match {
case Some(newSolver) => newSolver.solutionsTop
case _ =>
if (options.useGuessing) {
// Guess each possibility, maybe in a random order, and
// recurse. We could use Random.split when shuffling or
// recursing, but it's not really important for this
// application.
val shuffledPossible = Solver.maybeShuffle(rnd, possible)
doGuesses(cellNumber, shuffledPossible)
}
else {
Stream.Empty
}
}
}
}
// For each digit in the list, use it as a guess for unknown
// and try to solve the resulting Puzzle.
def doGuesses(cellNumber: Int, digits: Iterable[Int])
: Stream[Solution] =
{
digits.foldLeft(Stream.empty[Solution]) {(accum, digit) =>
val next = Next(Heuristic.Guess, Placement(cellNumber, digit),
List(cellNumber))
accum #::: placeAndContinue(next)
}
}
// Try to place a digit where an ExclusionSet has only one unplaced
// cell.
def findMissingOne : Stream[Next] = {
ExclusionSet.exclusionSets.flatMap{findMissingOneInSet(_)}
}
def findMissingOneInSet(set: ExclusionSet) : Stream[Next] = {
Solver.unknownsInSet(unknowns, set.cells) match {
case Stream(unknown) =>
// Exactly one cell in the set is unknown. Place a digit in
// it. Note that since this is the only unknown position in
// the set there should be exactly one possible digit
// remaining. But we may have made a wrong guess, which
// leaves no possibilities.
findForcedForUnknown(Heuristic.MissingOne, set.cells)(unknown)
case _ =>
// Zero or multiple cells in the set are unknown.
Stream.Empty
}
}
// Try to place a digit where a set has two unplaced cells. We only
// place one of the digits but the second will follow quickly.
def findMissingTwo : Stream[Next] = {
ExclusionSet.exclusionSets.flatMap(findMissingTwoInSet(_))
}
def findMissingTwoInSet(set: ExclusionSet) : Stream[Next] = {
Solver.unknownsInSet(unknowns, set.cells) match {
case unknowns@Stream(_, _) =>
// Exactly two cells in the set are unknown. Place digits in
// them if they are forced. A random one will be chosen
// upstream if necessary (and if we find anything to return).
unknowns.flatMap(findForcedForUnknown(Heuristic.MissingTwo, set.cells))
case _ =>
// Zero or too many unknowns for humans to easiy handle.
Stream.Empty
}
}
// Try to place a digit where there is a set that doesn't yet have
// some digit (i.e., it needs it) and there is only one cell in the
// set where it can possibly go.
def findNeeded : Stream[Next] = {
ExclusionSet.exclusionSets.flatMap(findNeededInSet(_))
}
def findNeededInSet(set: ExclusionSet) : Stream[Next] = {
val us = Solver.unknownsInSet(unknowns, set.cells)
val possible = us.foldLeft(0) {(accum, u) => accum | u.possible}
val possibleDigitList = Unknown.getPossibleList(possible)
possibleDigitList
.toStream
.flatMap(Solver.findNeededDigitInUnknowns(
us, Heuristic.Needed, set.cells))
}
def findForced : Stream[Next] = {
// Currying is somewhat ugly in scala, but seems to be a smidge
// faster,
unknowns.flatMap(findForcedForUnknown(Heuristic.Forced, List.empty))
}
// This can return either Stream, List, Option, or any concrete
// Iterable. But it performs best returning Stream.
def findForcedForUnknown
(tjpe: Heuristic.Value, cells: Iterable[Int])
(unknown: Unknown) :
Stream[Next] =
{
unknown.numPossible match {
case 1 =>
val cellNumber = unknown.cellNumber
val digit = unknown.getPossible.head
val cells2 = if (cells.nonEmpty) cells else List(cellNumber)
Stream(Next(tjpe, Placement(cellNumber, digit), cells2))
case _ =>
Stream.Empty
}
}
def applyOneTrickySetIfAllowed : Option[Solver] = {
if (options.usePermanentTrickySets) {
None // XXX
}
else {
None
}
}
def findEasyPeasy : Stream[Next] = {
EasyPeasy.find(puzzle, unknowns)
}
// Return a list of all possible TrickySet placements for the Puzzle.
//
// 1. Find (Digit, TrickySet) pairs where Digit is possible
// in common but not rest.
// 2. If there is only one Unknown in any checkNeeded list where Digit
// is possible then we have found a placement.
//
// We could also check for new forced digits in the eliminate
// positions. We could remove the digit from the possibilities
// permanently, but that's not something a person would remember
// unless they're using paper. So just remove locally while we see if
// that creates a new placement.
//
def findTricky : Stream[Next] = {
// 1:
val applicableTrickySets = findApplicableTrickySets
applicableTrickySets.flatMap{case (digit, trickySet) =>
// 2:
findNeededDigitInTrickySet(digit, trickySet)
}
}
// 1. Return all the (Dight, TrickySet) pairs where Digit is possible
// in TrickySet.common but not in TrickySet.rest.
//
def findApplicableTrickySets : Stream[(Int, TrickySet)] = {
for (digit <- allDigits; trickySet <- TrickySet.allTrickySets;
if trickySetMatchesForDigit(digit, trickySet))
yield (digit, trickySet)
}
val allDigits = (1 to 9).toStream
// 2. Look through the checkNeeded sets to see if any of them now have
// exactly one Unknown where the digit is possible, and if so then
// include the Unknown in the result.
//
def findNeededDigitInTrickySet(digit: Int, trickySet: TrickySet)
: Stream[Next] =
{
val unknownForEachNeededSet = trickySet.checkNeeded.flatMap{
findUnknownWhereDigitIsNeeded(unknowns, digit, _)
}
unknownForEachNeededSet.map{unknown =>
Next(Heuristic.Tricky, Placement(unknown.cellNumber, digit),
trickySet.common)
}
}
def trickySetMatchesForDigit(digit: Int, trickySet: TrickySet) : Boolean = {
isDigitPossibleInSet(digit, trickySet.common) &&
!isDigitPossibleInSet(digit, trickySet.rest)
}
// XXX is Set good?
def findUnknownWhereDigitIsNeeded(
unknowns: Stream[Unknown], digit: Int, set: Set[Int])
: Stream[Unknown] =
{
// Filters can be in either order but this order is way faster.
// XXX is that true?
val unknowns2 = unknowns.filter(unknown =>
unknown.isDigitPossible(digit) && set.contains(unknown.cellNumber))
unknowns2 match {
case Stream(_) => unknowns2
case _ => Stream.Empty
}
}
def isDigitPossibleInSet(digit: Int, cellNumbers :Stream[Int])
: Boolean =
{
// Filters can be in either order but this order is way faster.
// XXX is that true?
val possibleUnknowns = unknowns.filter(unknown =>
unknown.isDigitPossible(digit) &&
cellNumbers.contains(unknown.cellNumber))
possibleUnknowns.nonEmpty
}
}
object Solver {
def create(options: SolverOptions, rnd: Option[Random], puzzle: Puzzle)
: Solver =
{
val (rnd1, rnd2) = maybeSplit(rnd)
val unknowns = maybeShuffle(rnd1, (0 to 80).map(Unknown(_))).toStream
val step = Step(puzzle, Heuristic.Initial)
val heuristicFunctions =
options.heuristics.map(getHeuristicFunction).toStream
val solver = new Solver(
options, rnd, puzzle, unknowns, List(step), heuristicFunctions)
puzzle.each.foldLeft(solver) {case (accum, (cellNumber, digit)) =>
accum.place(cellNumber, digit)
}
}
// Heuristic functions return a Stream. We may need only the first
// result (when creating a Puzzle, to see whether the Puzzle is
// solvable), or we may pick a random result (when providing a
// hint). Using a Stream makes it ok either way since we'll only
// compute what we need.
def getHeuristicFunction(heuristic: Heuristic.Value)
: Solver => Stream[Next] = {
heuristic match {
case Heuristic.EasyPeasy => {_.findEasyPeasy}
case Heuristic.MissingOne => {_.findMissingOne}
case Heuristic.MissingTwo => {_.findMissingTwo}
case Heuristic.Tricky => {_.findTricky}
case Heuristic.Needed => {_.findNeeded}
case Heuristic.Forced => {_.findForced}
}
}
// Try to solve the Puzzle, returning a list of Solutions.
def solutions(options: SolverOptions)(puzzle: Puzzle) : Stream[Solution] = {
val solver = Solver.create(options, None, puzzle)
solver.solutionsTop
}
def randomSolutions
(options: SolverOptions, rnd: Random)
(puzzle: Puzzle)
: Stream[Solution] =
{
val solver = Solver.create(options, Some(rnd), puzzle)
solver.solutionsTop
}
// Compute all the solutions and return them in a random order.
def allRandomSolutions(rnd: Random)(puzzle: Puzzle) = {
randomSolutions(SolverOptions.all, rnd)(puzzle)
}
def maybeSplit(rnd: Option[Random]) : (Option[Random], Option[Random]) = {
rnd match {
case Some(rnd) =>
val (rnd1, rnd2) = Util.split(rnd)
(Some(rnd), Some(rnd))
case _ => (None, None)
}
}
// XXX Maybe a set intersection? Use a Set for the larger set?
def unknownsInSet(unknowns: Stream[Unknown], set: Set[Int])
: Stream[Unknown] =
{
unknowns.filter(u => set.contains(u.cellNumber))
}
def findNeededDigitInUnknowns
(unknowns: Stream[Unknown], tjpe: Heuristic.Value, cells: Iterable[Int])
(digit: Int)
: Stream[Next] =
{
unknowns.filter(_.isDigitPossible(digit)) match {
case Stream(unknown) =>
Stream(Next(tjpe, Placement(unknown.cellNumber, digit), cells))
case _ => Stream.Empty
}
}
def findNeededDigitInSet
(unknowns: Stream[Unknown], set: Set[Int], tjpe: Heuristic.Value)
(digit: Int)
: Stream[Next] =
{
val unknownsFromSet = unknownsInSet(unknowns, set)
findNeededDigitInUnknowns(unknownsFromSet, tjpe, set)(digit)
}
def maybeShuffle[T](rnd: Option[Random], list: Iterable[T]) : Iterable[T] = {
rnd match {
case Some(rnd) => Util.shuffle(list.toList, rnd)
case _ => list
}
}
import scala.reflect.ClassTag
def pickRandom[T:ClassTag](rnd: Random, list: Iterable[T]) : T = {
// Forget being tricky and general and handling arbitrarily
// large Iterables and Streaming shuffled results. Just
// materialize the thing and pick something.
val array = list.toArray
array(rnd.nextInt(array.size))
}
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Util.scala | <filename>scala/external/src/main/scala/net/tommay/sudoku/Util.scala
package net.tommay.sudoku
import scala.util.Random
object Util {
// Slice an list up into sub-lists of n elements, and return the
// sub-lists in a list.
def slices[T](n: Int, list: Iterable[T]) : List[Iterable[T]] = {
list.grouped(n).toList
}
// Weird generic voodoo as per
// http://stackoverflow.com/questions/19385235/how-to-paramaterize-int-as-ordered-in-scala
//
def minBy[T, K <% Ordered[K]](list: Iterable[T], func: T => K) : T = {
val enhanced = list.map(e => (func(e), e))
val minEnhanced = enhanced.tail.foldLeft(enhanced.head) {
case (a@(na, _), b@(nb, _)) =>
// xxx choose a or b on ==?
if (na <= nb) a else b
}
minEnhanced._2
}
def shuffle[T](list: Iterable[T], rnd: Random) : List[T] = {
rnd.shuffle(list.toList)
}
def split(rnd: Random) : (Random, Random) = {
(rnd, rnd)
}
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Solution.scala | package net.tommay.sudoku
case class Solution(
puzzle: Puzzle,
steps: Iterable[Step])
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/EasyPeasy.scala | package net.tommay.sudoku
// Each Stripe has three ExclusionSet rows or columns. It has a Set
// of all column numbers in the Stripe, and a Stream of the three
// ExclusionSets. To find an EasyPeasy, count the occurences of each
// digit in the entire Stripe. For each digit with two occurences,
// check for an ExclusionSet with only one Unknown where the digit is
// possible.
// List performs better than Vector for cells.
case class Stripe(
cells: List[Int],
exclusionSets: Stream[ExclusionSet])
object EasyPeasy {
// Build all the possible Stripes so they can be searched for easy
// peasies.
val stripes : Stream[Stripe] = {
Util.slices(3, (ExclusionSet.rows ++ ExclusionSet.columns))
.toStream
.map(_.toStream)
.map(makeStripe)
}
// Take a stripe of three ExclusionSets in a Stream and create a
// Stripe with rhe combined Set of all the cell numbers, and the
// ExclusionSets.
//
def makeStripe(exclusionSets: Stream[ExclusionSet]) : Stripe = {
val allCells = exclusionSets.foldLeft(Set.empty[Int]){
case (accum, exclusionSet) => accum ++ exclusionSet.cells
}.toList
Stripe(allCells, exclusionSets)
}
// Return a Stream of all possible easy peasy placements for the Puzzle.
def find(puzzle: Puzzle, unknowns: Stream[Unknown]) : Stream[Next] = {
stripes.flatMap(findForEasyPeasyStripe(puzzle, unknowns))
}
// Returns any easy peasies in the Puzzle and Stripe. All digits
// are considered
def findForEasyPeasyStripe
(puzzle: Puzzle, unknowns: Stream[Unknown])
(stripe: Stripe)
: Stream[Next] =
{
val placed = puzzle.placed
// This foldLeft is faster than the straightforward
// flatMap(cellNumber => placed.get(cellNumber)), ridiculous.
// And it's much fasterto generate allDigits and do the groupBy than it
// is to tally the counts into a Map to begin with.
val allDigits =
stripe.cells.foldLeft(List.empty[Int]) {case (accum, cellNumber) =>
placed.get(cellNumber) match {
case None => accum
case Some(digit) => digit :: accum
}
}
val doubleDigits = allDigits
.groupBy(identity)
.toStream
.withFilter{case (_, list) => list.size == 2}
.map{case (digit, _) => digit}
stripe.exclusionSets.flatMap(findOnePossible(unknowns, doubleDigits))
}
def findOnePossible
(unknowns: Stream[Unknown], digits: Stream[Int])
(exclusionSet: ExclusionSet)
: Stream[Next] =
{
digits.flatMap(Solver.findNeededDigitInSet(
unknowns, exclusionSet.cells, Heuristic.EasyPeasy))
}
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/HinterForJava.scala | package net.tommay.sudoku
// Provide a "getHint" function intended to be called from Java
// that does all the heavy lifting like Random creation and dredging
// information out of the Solution in Scala.
object HinterForJava {
def getHint(randomSeed: Int, puzzleString: String) : Option[Hint] = {
// Heuristics are listed here from easiest for humans to do, to
// hardest.
val options = new SolverOptions(
List(
Heuristic.MissingOne,
Heuristic.EasyPeasy,
Heuristic.Needed,
Heuristic.MissingTwo,
Heuristic.Tricky,
Heuristic.Forced),
false, true, false)
val rnd = new scala.util.Random(randomSeed)
val puzzle = Puzzle.fromString(puzzleString)
val solutions = Solver.randomSolutions(options, rnd)(puzzle)
solutions match {
case (solution #:: _) =>
val stepsWithPlacement =
solution.steps.filter(_.placementOption.isDefined)
stepsWithPlacement match {
case (step :: _) =>
Some(Hint(step.tjpe, step.placementOption.get, step.cells))
// This should only happen if the puzzle is solved, in which case
// getHint won't be called.
case _ => None
}
// There are no solutions, they've made a mistake.
case _ => None
}
}
// Load a puzzle from the given file and print its randomized hint.
def main(args: Array[String]) {
val filename = args(0)
val puzzleString = Solve.getSetup(filename)
val seed = System.currentTimeMillis.toInt
println(getHint(seed, puzzleString))
}
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Heuristic.scala | <filename>scala/external/src/main/scala/net/tommay/sudoku/Heuristic.scala
package net.tommay.sudoku
// XXX grok Enumeration.
object Heuristic extends Enumeration
{
val
EasyPeasy,
MissingOne,
MissingTwo,
Needed,
Forced,
Tricky,
// These are not heuristics, they're just used to type Next and
// Step.
Initial,
Guess,
ForcedGuess
= Value
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Puzzle.scala | package net.tommay.sudoku
// cellNumber -> Digit
// XXX this should be private, all construction via Puzzle.methods.
// All a Puzzle has is a Map from cellNumbers to the Digit placed at
// that cellNumber.
case class Puzzle(
placed : Map[Int, Int] = Map.empty)
{
type Digit = Int
// For when we need to iterate over all cellNumber/Digits.
def each: Iterable[(Int, Digit)] = {
placed
}
// Adds a new Digit to the puzzle at the given cellNumber.
def place(cellNumber: Int, digit: Digit) : Puzzle = {
this.copy(placed = placed + (cellNumber -> digit))
}
// Given an Iterable of cellNumbers, removes the Digits from those
// cellNumbers.
def remove(cellNumbers: Iterable[Int]) : Puzzle = {
val remaining = cellNumbers.foldLeft(placed) {
(map, cellNumber) => map - cellNumber
}
this.copy(placed = remaining)
}
// Returns the number of placed digits.
def size : Int = {
placed.size
}
// The opposite of fromString. Given a Puzzle, create a string of
// 81 digits or dashes. Creates two lists of (cellNumber, Char),
// one for placed cells and one for unplaced cells, then sorts them
// together and extracts the Chars in order.
override
def toString : String = {
val p = each.map{
case (k, v) => (k, (v + '0'.toInt).toChar)
}
val unknownNumbers = (0 to 80).toSet -- placed.keySet
val u = unknownNumbers.map{(_, '-')}
(p ++ u).toList.sorted.map{_._2}.mkString
}
// Returns a string that prints out as a grid of digits.
def toPuzzleString : String = {
val string = this.toString
Util.slices(27, string).map {superRow =>
Util.slices(9, superRow).map {row =>
Util.slices(3, row).map{_.mkString}.mkString(" ")
}.mkString("\n")
}.mkString("\n\n")
}
}
object Puzzle {
// Returns a new Puzzle with nothing placed.
def empty : Puzzle = {
Puzzle()
}
// Returns a new Puzzle with each Cell initialized according to
// Setup, which is a string of 81 digits or dashes.
def fromString(setup: String) : Puzzle = {
val digits = toDigits(setup)
val zipped = (0 to 80).zip(digits)
zipped.foldLeft(Puzzle.empty) {case (puzzle, (cellNumber, digit)) =>
digit match {
case None => puzzle
case Some(digit) => puzzle.place(cellNumber, digit)
}
}
}
// Given a Setup String, returns a Seq of Option[Digit] for each
// cell.
// xxx Digit, digit conversion
def toDigits(setup: String) : Seq[Option[Int]] = {
setup.map{
case '-' => None
case char => Some(char.toInt - '0'.toInt)
}
}
}
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Step.scala | package net.tommay.sudoku
case class Step(
puzzle: Puzzle,
tjpe: Heuristic.Value,
placementOption: Option[Placement] = None,
cells: Iterable[Int] = List.empty)
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/Next.scala | package net.tommay.sudoku
case class Next(
tjpe: Heuristic.Value,
placement: Placement,
cells: Iterable[Int])
|
tommay/spudoku-android | scala/external/src/main/scala/net/tommay/sudoku/SolverOptions.scala | <reponame>tommay/spudoku-android<gh_stars>0
package net.tommay.sudoku
// EasyPeasy: An easy pattern to spot visually. where two rows or columns
// in a 3-stripe contain a digit and there is only one place in the
// remaining column where it can go. This is a subset of Needed, but is
// easy to spot.
// MissingOne: A set is missing only one digit. This is a subset of
// both Needed and Forced, but is easier to spot.
// MissingTwo: A set is missing two digits, and at least one is Forced. The
// remaining digit will eventually be found by MissongOne or some other
// means.
// Tricky: A square is missing a digit and there is only one row or column
// where it can go. There is not enough information to place the digit
// in the square, but digit can be reliminated as possibility for the
// rest of the row or column. For the heuristic we just check whether
// the containing squares need the digit and there is now just one place
// for it in the square.
// Needed: A set doesn't have a digit and there is only one place it can go.
// Forced: A cell has only one possibility, all others having been eliminated.
// This is most tedious to spot.
//
// Of the heurisyics, only Needed, Tricky, and Forced are independent:
// none is a subset of any other nor the combination of the others. They
// can all find placements the others can't.
//
// Whereas the Tricky heuristic removes possibilities only while checking
// for needed digits, the usePermanentTrickySets option will eliminate
// the possibilities for the rest of the solution steps, and will also
// eliminate using "inverse" TrickySets in which possibilities are removed
// from the remainder of a square intead of a row/column. The idea is to
// get the strongest solver that doesn't require guessing, even though
// it's impossible to solve these puzzles visually.
case class SolverOptions(
useHeuristics: Boolean,
heuristics: Iterable[Heuristic.Value],
usePermanentTrickySets: Boolean,
useGuessing: Boolean,
solveCompletely: Boolean)
{
def this(heuristics: Iterable[Heuristic.Value],
usePermanentTrickySets: Boolean,
useGuessing: Boolean)
{
this(heuristics.nonEmpty, heuristics, usePermanentTrickySets, useGuessing,
true)
}
def this(
heuristics: Iterable[Heuristic.Value],
usePermanentTrickySets: Boolean,
useGuessing: Boolean,
solveCompletely: Boolean)
{
this(heuristics.nonEmpty, heuristics, usePermanentTrickySets, useGuessing,
solveCompletely)
}
}
object SolverOptions {
val all = new SolverOptions(List(), false, true)
// Try Forced first because it's fast. MissingOne and MissingTwo
// are redundant with Forced. EasyPeasy is redundant with Needed.
val noGuessing = new SolverOptions(
List(Heuristic.Forced, Heuristic.Needed, Heuristic.Tricky), false, false)
}
|
keyno63/scalikejdbc | scalikejdbc-core/src/main/scala/scalikejdbc/OneToOneSQL.scala | <reponame>keyno63/scalikejdbc<gh_stars>0
package scalikejdbc
import scala.collection.mutable.LinkedHashMap
import scala.collection.compat._
private[scalikejdbc] trait OneToOneExtractor[A, B, E <: WithExtractor, Z]
extends SQL[Z, E]
with RelationalSQLResultSetOperations[Z] {
private[scalikejdbc] def extractOne: WrappedResultSet => A
private[scalikejdbc] def extractTo: WrappedResultSet => Option[B]
private[scalikejdbc] def transform: (A, B) => Z
private[scalikejdbc] def processResultSet(
oneToOne: LinkedHashMap[A, Option[B]],
rs: WrappedResultSet
): LinkedHashMap[A, Option[B]] = {
val o = extractOne(rs)
if (oneToOne.contains(o)) {
throw new IllegalRelationshipException(
ErrorMessage.INVALID_ONE_TO_ONE_RELATION
)
} else {
oneToOne += (o -> extractTo(rs))
}
}
private[scalikejdbc] def toIterable(
session: DBSession,
sql: String,
params: scala.collection.Seq[_],
zExtractor: (A, B) => Z
): Iterable[Z] = {
val attributesSwitcher = createDBSessionAttributesSwitcher
DBSessionWrapper(session, attributesSwitcher)
.foldLeft(statement, rawParameters.toSeq: _*)(
LinkedHashMap[A, Option[B]]()
)(processResultSet)
.map {
case (one, Some(to)) => zExtractor(one, to)
case (one, None) => one.asInstanceOf[Z]
}
}
}
class OneToOneSQL[A, B, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(val toOne: WrappedResultSet => Option[B])(
val zExtractor: (A, B) => Z
) extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-one extractor(one(RS => A).toOne(RS => Option[B])) is specified, use #map((A,B) =>Z) instead."
)
)
with AllOutputDecisionsUnsupported[Z, E] {
def map(zExtractor: (A, B) => Z): OneToOneSQL[A, B, HasExtractor, Z] = {
new OneToOneSQL(statement, rawParameters)(one)(toOne)(zExtractor)
}
override def toIterable: OneToOneSQLToIterable[A, B, E, Z] = {
new OneToOneSQLToIterable[A, B, E, Z](statement, rawParameters)(one)(toOne)(
zExtractor
)
}
override def toList: OneToOneSQLToList[A, B, E, Z] = {
new OneToOneSQLToList[A, B, E, Z](statement, rawParameters)(one)(toOne)(
zExtractor
)
}
override def toOption: OneToOneSQLToOption[A, B, E, Z] = {
new OneToOneSQLToOption[A, B, E, Z](statement, rawParameters)(one)(toOne)(
zExtractor
)(true)
}
override def headOption: OneToOneSQLToOption[A, B, E, Z] = {
new OneToOneSQLToOption[A, B, E, Z](statement, rawParameters)(one)(toOne)(
zExtractor
)(false)
}
override def toCollection: OneToOneSQLToCollection[A, B, E, Z] = {
new OneToOneSQLToCollection[A, B, E, Z](statement, rawParameters)(one)(
toOne
)(zExtractor)
}
override def single: OneToOneSQLToOption[A, B, E, Z] = toOption
override def first: OneToOneSQLToOption[A, B, E, Z] = headOption
override def list: OneToOneSQLToList[A, B, E, Z] = toList
override def iterable: OneToOneSQLToIterable[A, B, E, Z] = toIterable
override def collection: OneToOneSQLToCollection[A, B, E, Z] = toCollection
}
object OneToOneSQL {
def unapply[A, B, E <: WithExtractor, Z](
sqlObject: OneToOneSQL[A, B, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B],
(A, B) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.toOne,
sqlObject.zExtractor
)
)
}
}
class OneToOneSQLToIterable[A, B, E <: WithExtractor, Z](
override val statement: String,
override private[scalikejdbc] val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(val toOne: WrappedResultSet => Option[B])(
val zExtractor: (A, B) => Z
) extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-one extractor(one(RS => A).toOne(RS => Option[B])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToIterable[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToOneExtractor[A, B, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor
): Iterable[Z] = {
executeQuery[Iterable](
session,
(session: DBSession) =>
toIterable(session, statement, rawParameters, transform)
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo: WrappedResultSet => Option[B] = toOne
private[scalikejdbc] def transform: (A, B) => Z = zExtractor
}
object OneToOneSQLToIterable {
def unapply[A, B, E <: WithExtractor, Z](
sqlObject: OneToOneSQLToIterable[A, B, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B],
(A, B) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.toOne,
sqlObject.zExtractor
)
)
}
}
class OneToOneSQLToList[A, B, E <: WithExtractor, Z](
override val statement: String,
override private[scalikejdbc] val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(val toOne: WrappedResultSet => Option[B])(
val zExtractor: (A, B) => Z
) extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-one extractor(one(RS => A).toOne(RS => Option[B])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToList[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToOneExtractor[A, B, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor
): List[Z] = {
executeQuery[List](
session,
(session: DBSession) =>
toIterable(session, statement, rawParameters, zExtractor).toList
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo: WrappedResultSet => Option[B] = toOne
private[scalikejdbc] def transform: (A, B) => Z = zExtractor
}
object OneToOneSQLToList {
def unapply[A, B, E <: WithExtractor, Z](
sqlObject: OneToOneSQLToList[A, B, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B],
(A, B) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.toOne,
sqlObject.zExtractor
)
)
}
}
class OneToOneSQLToCollection[A, B, E <: WithExtractor, Z](
override val statement: String,
override private[scalikejdbc] val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(val toOne: WrappedResultSet => Option[B])(
val zExtractor: (A, B) => Z
) extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-one extractor(one(RS => A).toOne(RS => Option[B])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToCollection[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToOneExtractor[A, B, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply[C[_]]()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor,
f: Factory[Z, C[Z]]
): C[Z] = {
executeQuery(
session,
(session: DBSession) =>
f.fromSpecific(
toIterable(session, statement, rawParameters, zExtractor)
)
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo: WrappedResultSet => Option[B] = toOne
private[scalikejdbc] def transform: (A, B) => Z = zExtractor
}
object OneToOneSQLToCollection {
def unapply[A, B, E <: WithExtractor, Z](
sqlObject: OneToOneSQLToCollection[A, B, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B],
(A, B) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.toOne,
sqlObject.zExtractor
)
)
}
}
class OneToOneSQLToOption[A, B, E <: WithExtractor, Z](
override val statement: String,
override private[scalikejdbc] val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(val toOne: WrappedResultSet => Option[B])(
val zExtractor: (A, B) => Z
)(val isSingle: Boolean = true)
extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-one extractor(one(RS => A).toOne(RS => Option[B])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToOption[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToOneExtractor[A, B, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor
): Option[Z] = {
executeQuery[Option](
session,
(session: DBSession) =>
toSingle(toIterable(session, statement, rawParameters, zExtractor))
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo: WrappedResultSet => Option[B] = toOne
private[scalikejdbc] def transform: (A, B) => Z = zExtractor
}
object OneToOneSQLToOption {
def unapply[A, B, E <: WithExtractor, Z](
sqlObject: OneToOneSQLToOption[A, B, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B],
(A, B) => Z,
Boolean
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.toOne,
sqlObject.zExtractor,
sqlObject.isSingle
)
)
}
}
|
keyno63/scalikejdbc | scalikejdbc-streams/src/main/scala/scalikejdbc/streams/DatabaseSubscription.scala | package scalikejdbc.streams
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong }
import org.reactivestreams.{ Subscriber, Subscription }
import scalikejdbc._
import scala.concurrent.Promise
import scala.util.{ Failure, Success }
import scala.util.control.NonFatal
/**
* A DatabaseSubscription represents a one-to-one lifecycle of a Subscriber subscribing to a DatabasePublisher.
*
* It can only be used once by a single Subscriber.
* It is used to both signal desire for data and cancel demand (and allow resource cleanup).
*/
private[streams] class DatabaseSubscription[A](
/**
* DatabasePublisher in the fashion of Reactive Streams.
*/
private[streams] val publisher: DatabasePublisher[A],
/**
* Subscriber in the fashion of Reactive Streams.
*/
private[streams] val subscriber: Subscriber[_ >: A]
) extends Subscription
with LogSupport {
// -----------------------------------------------
// Internal state
// -----------------------------------------------
/**
* Stream ready SQL object.
*/
private def sql: StreamReadySQL[A] = publisher.sql
/**
* A volatile variable to enforce the happens-before relationship when executing something in a synchronous action context.
*
* - [[https://docs.oracle.com/javase/specs/jls/se7/html/jls-17.html]]
* - [[http://gee.cs.oswego.edu/dl/jmm/cookbook.html]]
*
* It is read when entering the context and written when leaving it
* so that all writes to non-volatile variables within the context are visible to the next synchronous execution.
*/
@volatile
private var sync: Int = 0
/**
* A database session occupied by current subscription.
*/
private[this] var _maybeOccupiedDBSession: Option[DBSession] = None
/**
* The state for a suspended streaming action.
* Must be set only from a synchronous action context.
*/
private var maybeRemainingIterator: Option[StreamResultSetIterator[A]] = None
/**
* The Promise to complete when streaming has finished.
*/
private val endOfStream: Promise[Unit] = Promise[Unit]()
/**
* The total number of elements requested and not yet marked as delivered by the synchronous streaming action.
*
* Whenever the value drops to 0, streaming is suspended.
* The value is initially set to `Long.MinValue` when the streaming starts.
* When the value is raised up from 0 in #request(Long), the streaming is scheduled to be restarted.
* Any negative value of more than `Long.MinValue` indicates the actual demand at that point.
* It is reset to 0 when the initial streaming ends.
*/
private[this] val _numberOfRemainingElements: AtomicLong = new AtomicLong(
Long.MinValue
)
/**
* Returns true if it has been cancelled by the Subscriber.
*/
private[this] val _isCancellationAlreadyRequested: AtomicBoolean =
new AtomicBoolean(false)
/**
* Whether the Subscriber has been signaled with `onComplete` or `onError`.
*/
private[this] val _isCurrentSubscriptionFinished: AtomicBoolean =
new AtomicBoolean(false)
/**
* An error that will be signaled to the Subscriber when the stream is cancelled or terminated.
*
* This is used for signaling demand overflow in #request(Long)
* while guaranteeing that the #onError(Throwable) message does not overlap with an active #onNext(A) call.
*/
private[this] var _maybeDeferredError: Option[Throwable] = None
// -----------------------------------------------
// Reactive Streams Subscription APIs
// -----------------------------------------------
/**
* No events will be sent by a Publisher until demand is signaled via this method.
*/
override def request(n: Long): Unit = {
if (isCancellationAlreadyRequested) {
if (log.isDebugEnabled) {
log.debug(
s"Subscription#request($n) called from subscriber: ${subscriber} after cancellation, skipped processing"
)
}
} else {
if (log.isDebugEnabled) {
log.debug(
s"Subscription#request($n) called from subscriber: ${subscriber}"
)
}
if (n <= 0) {
// 3. Subscription - 9
// see: https://github.com/reactive-streams/reactive-streams-jvm/blob/v1.0.0/README.md#3-subscription-code
//
// While the Subscription is not cancelled, Subscription.request(long n)
// MUST signal onError with a java.lang.IllegalArgumentException if the argument is <= 0.
// The cause message MUST include a reference to this rule and/or quote the full rule.
//
_maybeDeferredError = Some(
new IllegalArgumentException(
"The n of Subscription#request(long n) must not be larger than 0 (Reactive Streams spec, 3.9)"
)
)
cancel()
} else {
// 3. Subscription - 17
// see: https://github.com/reactive-streams/reactive-streams-jvm/blob/v1.0.0/README.md#3-subscription-code
//
// A Subscription MUST support an unbounded number of calls to request
// and MUST support a demand (sum requested - sum delivered) up to 2^63-1 (java.lang.Long.MAX_VALUE).
// A demand equal or greater than 2^63-1 (java.lang.Long.MAX_VALUE) MAY be considered by the Publisher as “effectively unbounded”[3].
//
if (
isCancellationAlreadyRequested == false && _numberOfRemainingElements
.getAndAdd(n) == 0L
) {
reScheduleSynchronousStreaming()
}
}
}
}
/**
* Requests the Publisher to stop sending data and clean up resources.
*/
override def cancel(): Unit = {
if (_isCancellationAlreadyRequested.getAndSet(true)) {
if (log.isDebugEnabled) {
log.debug(
s"Subscription#cancel() called from subscriber: ${subscriber} again, skipped processing"
)
}
} else {
log.info(s"Subscription#cancel() called from subscriber: ${subscriber}")
// restart the streaming here because cancelling it requires closing the occupied database session.
// This will also complete the result Promise and thus allow the rest of the scheduled Action to run.
if (_numberOfRemainingElements.getAndSet(Long.MaxValue) == 0L) {
try {
reScheduleSynchronousStreaming()
} catch {
case t: Throwable =>
log.warn("Caught an exception in Subscription#cancel()", t)
finishAsCompletionWithoutException()
t match {
case _: InterruptedException => Thread.currentThread().interrupt()
case _ => throw t
}
}
}
}
}
// -----------------------------------------------
// scalikejdbc-streams internal APIs
// -----------------------------------------------
/**
* Prepares the completion handler of the current streaming process.
*/
private[streams] def prepareCompletionHandler(): Unit = {
implicit val ec = publisher.asyncExecutor.executionContext
endOfStream.future.onComplete {
case Success(_) => onComplete()
case Failure(t) => onError(t)
}
}
/**
* Finishes the streaming when some error happens.
*/
private[streams] def onError(t: Throwable): Unit = {
if (_isCurrentSubscriptionFinished.getAndSet(true) == false) {
if (log.isDebugEnabled) {
log.debug(
s"Subscriber#onError for subscriber: ${subscriber} called with exception: $t"
)
}
try {
subscriber.onError(t)
} catch {
case NonFatal(e) =>
log.warn(
s"Subscriber#onError for subscriber: ${subscriber} unexpectedly failed because ${e.getMessage}",
e
)
}
}
}
/**
* Starts new streaming.
*/
private[streams] def startNewStreaming(): Unit = {
scheduleSynchronousStreaming(None)
}
// -----------------------------------------------
// Internal APIs
// visible to only threads current subscription started
// -----------------------------------------------
/**
* Indicate that the specified number of elements has been delivered.
*
* Returns the remaining demand.
*
* This is an atomic operation.
* It must only be called from the synchronous action context which performs the streaming.
*/
private def saveNumberOfDeliveredElementsAndReturnRemainingDemand(
num: Long
): Long = {
_numberOfRemainingElements.addAndGet(-num)
}
/**
* Get the current demand that has not yet been marked as delivered and mark it as being in the current batch.
* When this value is negative, the initial streaming action is still running
* and the real demand can be computed by subtracting `Long.MinValue` from the returned value.
*/
private def demandBatch: Long = _numberOfRemainingElements.get()
/**
* Returns the deferred error of current subscription if exists.
*/
private def maybeDeferredError: Option[Throwable] = _maybeDeferredError
/**
* Returns the DBSession occupied by current subscription.
*/
private def maybeOccupiedDBSession: Option[DBSession] =
_maybeOccupiedDBSession
/**
* Whether the stream has been cancelled by the Subscriber
*/
private def isCancellationAlreadyRequested: Boolean =
_isCancellationAlreadyRequested.get()
/**
* Whether the current subscription is already finished.
*/
private def isCurrentSubscriptionFinished: Boolean =
_isCurrentSubscriptionFinished.get()
/**
* Issues a query and creates a new iterator to consume.
*/
private def issueQueryAndCreateNewIterator(): StreamResultSetIterator[A] = {
val occupiedDBSession =
maybeOccupiedDBSession.getOrElse(occupyNewDBSession())
val statementExecutor = new DBSessionWrapper(
occupiedDBSession,
sql.createDBSessionAttributesSwitcher
).toStatementExecutor(sql.statement, sql.rawParameters)
val resultSet = statementExecutor.executeQuery()
val resultSetProxy = new DBConnectionAttributesWiredResultSet(
resultSet,
occupiedDBSession.connectionAttributes
)
new StreamResultSetIterator[A](resultSetProxy, sql.extractor) {
private[this] var closed = false
override def close(): Unit = {
if (!closed) {
statementExecutor.close()
closed = true
}
}
}
}
/**
* Borrows a new database session and returns it.
*/
private def occupyNewDBSession(): DBSession = {
if (log.isDebugEnabled) {
log.debug(
s"Acquiring a new database session for subscriber: ${subscriber}"
)
}
_maybeOccupiedDBSession match {
case Some(_) => releaseOccupiedDBSession(true)
case _ =>
}
val session: DBSession = {
implicit val cpContext = publisher.settings.connectionPoolContext
val sessionProvider: NamedDB =
NamedDB(publisher.settings.dbName, publisher.settings.settingsProvider)
sessionProvider.autoClose(false).readOnlySession()
}
_maybeOccupiedDBSession = Some(session)
session
}
/**
* Releases the occupied database session.
*/
private def releaseOccupiedDBSession(discardErrors: Boolean): Unit = {
if (log.isDebugEnabled) {
log.debug(
s"Releasing the occupied database session for subscriber: ${subscriber}"
)
}
try {
_maybeOccupiedDBSession match {
case Some(session) => session.close()
case _ =>
}
} catch {
case NonFatal(e) if discardErrors =>
if (log.isDebugEnabled) {
log.debug(
s"Failed to close the occupied database session because ${e.getMessage}",
e
)
} else {
log.info(
s"Failed to close the occupied database session because ${e.getMessage}, exception: ${ClassNameUtil
.getClassName(e.getClass)}"
)
}
} finally {
_maybeOccupiedDBSession = None
}
}
// -----------------------------------------------
// Completely internal methods
// -----------------------------------------------
/**
* Schedules a synchronous streaming which holds the given iterator.
*/
private[this] def scheduleSynchronousStreaming(
maybeIterator: Option[StreamResultSetIterator[A]]
): Unit = {
val currentSubscription = this
try {
val task: Runnable = new Runnable() {
def run(): Unit = {
try {
// must start with remaining iterator every time invoking this Runnable
var maybeRemainingIterator: Option[StreamResultSetIterator[A]] =
maybeIterator
val _ = currentSubscription.sync
maybeRemainingIterator match {
case None => currentSubscription.occupyNewDBSession()
case _ =>
}
var demand: Long = currentSubscription.demandBatch
var realDemand: Long =
if (demand < 0) demand - Long.MinValue else demand
def loop(): Unit = {
try {
if (currentSubscription.isCancellationAlreadyRequested) {
// ------------------------
// cancelling the current subscription
log.info(
s"Cancellation from subscriber: ${currentSubscription.subscriber} detected"
)
try {
currentSubscription.maybeDeferredError match {
case Some(error) =>
log.info(
s"Responding the deferred error : ${currentSubscription.maybeDeferredError} to the cancellation"
)
throw error
case _ =>
}
} finally {
cleanUpResources()
}
} else if (realDemand > 0 || maybeRemainingIterator.isEmpty) {
// ------------------------
// proceed with the remaining iterator
// create a new iterator if it absent.
val iterator: StreamResultSetIterator[A] = {
maybeRemainingIterator match {
case Some(iterator) => iterator
case _ =>
currentSubscription.issueQueryAndCreateNewIterator()
}
}
maybeRemainingIterator =
emitElementsAndReturnRemainingIterator(realDemand, iterator)
}
if (maybeRemainingIterator.isEmpty) {
log.info(
s"All data for subscriber: ${currentSubscription.subscriber} has been sent"
)
finishAsCompletionWithoutException()
}
} catch {
case NonFatal(e) =>
if (log.isDebugEnabled) {
log.debug(
s"Unexpectedly failed to deal with remaining iterator because ${e.getMessage}",
e
)
} else {
log.info(
s"Unexpectedly failed to deal with remaining iterator because ${e.getMessage}, exception: ${ClassNameUtil
.getClassName(e.getClass)}"
)
}
cleanUpResources()
throw e
} finally {
currentSubscription.maybeRemainingIterator =
maybeRemainingIterator
currentSubscription.sync = 0
}
demand = currentSubscription
.saveNumberOfDeliveredElementsAndReturnRemainingDemand(demand)
realDemand = if (demand < 0) demand - Long.MinValue else demand
}
loop()
while (maybeRemainingIterator.isDefined && realDemand > 0) {
loop()
}
} catch {
case NonFatal(ex) =>
currentSubscription.endOfStream.tryFailure(ex)
}
}
}
publisher.asyncExecutor.execute(task)
} catch {
case NonFatal(e) =>
log.warn(
s"Failed to schedule a synchronous processing because ${e.getMessage}",
e
)
throw e
}
}
/**
* Restarts a suspended streaming action.
* Must only be called from the Subscriber context.
*/
private[this] def reScheduleSynchronousStreaming(): Unit = {
val _ = sync
maybeRemainingIterator match {
case Some(remainingIterator) =>
maybeRemainingIterator = None
scheduleSynchronousStreaming(Some(remainingIterator))
case _ =>
}
}
/**
* Emits a bunch of elements.
*/
private[this] def emitElementsAndReturnRemainingIterator(
realDemand: Long,
iterator: StreamResultSetIterator[A]
): Option[StreamResultSetIterator[A]] = {
val bufferNext = publisher.settings.bufferNext
var count = 0L
try {
while ({
if (bufferNext) iterator.hasNext && count < realDemand
else count < realDemand && iterator.hasNext
}) {
count += 1
subscriber.onNext(iterator.next())
}
} catch {
case NonFatal(e) =>
try {
iterator.close()
} catch { case NonFatal(_) => }
throw e
}
if (log.isDebugEnabled) {
log.debug(s"Emitted $count element${if (count > 1) "s"
else ""} to subscriber: ${subscriber}, realDemand: ${realDemand}")
}
if (
(bufferNext && iterator.hasNext)
|| (bufferNext == false && count == realDemand)
) {
Some(iterator)
} else {
None
}
}
/**
* Cleans up the occupied resources.
*/
private[this] def cleanUpResources(): Unit = {
try {
releaseOccupiedDBSession(true)
log.info(
s"Finished cleaning up database resources occupied for subscriber: ${subscriber}"
)
} catch {
case NonFatal(e) =>
log.warn(
"Caught an exception while releasing the occupied database session",
e
)
} finally {
try {
maybeRemainingIterator match {
case Some(iterator) =>
if (iterator != null) {
iterator.close()
}
maybeRemainingIterator = None
case _ =>
}
} catch {
case NonFatal(e) =>
log.warn(
"Caught an exception while closing the remaining iterator",
e
)
}
}
}
/**
* Finishes the current subscription as completed.
*/
private[this] def finishAsCompletionWithoutException(): Unit = {
try {
cleanUpResources()
} catch {
case e: Throwable => throw e
} finally {
try {
endOfStream.trySuccess(())
} catch {
case NonFatal(e) =>
log.warn("Caught an exception while finishing the subscription", e)
}
}
}
/**
* Finishes the stream with `onComplete` if it is not finished yet.
* May only be called from a synchronous action context.
*/
private[this] def onComplete(): Unit = {
if (
isCurrentSubscriptionFinished == false && isCancellationAlreadyRequested == false
) {
if (log.isDebugEnabled) {
log.debug(
s"Invoking ${subscriber}#onComplete() from Subscription#onComplete()"
)
}
_isCurrentSubscriptionFinished.set(true)
try {
subscriber.onComplete()
} catch {
case NonFatal(e) =>
log.warn(
s"Subscriber#onComplete() for subscriber: ${subscriber} unexpectedly failed because ${e.getMessage}",
e
)
}
}
}
}
|
keyno63/scalikejdbc | scalikejdbc-core/src/main/scala/scalikejdbc/OneToManies2SQL.scala | package scalikejdbc
import scala.collection.mutable.LinkedHashMap
import scala.collection.compat._
private[scalikejdbc] trait OneToManies2Extractor[
A,
B1,
B2,
E <: WithExtractor,
Z
] extends SQL[Z, E]
with RelationalSQLResultSetOperations[Z] {
private[scalikejdbc] def extractOne: WrappedResultSet => A
private[scalikejdbc] def extractTo1: WrappedResultSet => Option[B1]
private[scalikejdbc] def extractTo2: WrappedResultSet => Option[B2]
private[scalikejdbc] def transform
: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
private[scalikejdbc] def processResultSet(
result: LinkedHashMap[A, (Seq[B1], scala.collection.Seq[B2])],
rs: WrappedResultSet
): LinkedHashMap[A, (Seq[B1], scala.collection.Seq[B2])] = {
val o = extractOne(rs)
val (to1, to2) = (extractTo1(rs), extractTo2(rs))
if (result.contains(o)) {
to1
.orElse(to2)
.map { _ =>
val (ts1, ts2) = result.apply(o)
result += ((o -> (
(
to1
.map(t => if (ts1.contains(t)) ts1 else ts1 :+ t)
.getOrElse(ts1),
to2
.map(t => if (ts2.contains(t)) ts2 else ts2 :+ t)
.getOrElse(ts2)
)
)))
}
.getOrElse(result)
} else {
result += ((o -> (
(
to1.map(t => Vector(t)).getOrElse(Vector()),
to2.map(t => Vector(t)).getOrElse(Vector())
)
)))
}
}
private[scalikejdbc] def toIterable(
session: DBSession,
sql: String,
params: scala.collection.Seq[_],
zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
): Iterable[Z] = {
val attributesSwitcher = createDBSessionAttributesSwitcher
DBSessionWrapper(session, attributesSwitcher)
.foldLeft(statement, rawParameters.toSeq: _*)(
LinkedHashMap[A, (Seq[B1], scala.collection.Seq[B2])]()
)(processResultSet)
.map { case (one, (t1, t2)) =>
zExtractor(one, t1, t2)
}
}
}
class OneToManies2SQL[A, B1, B2, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(
val to1: WrappedResultSet => Option[B1],
val to2: WrappedResultSet => Option[B2]
)(val zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z)
extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-many extractor(one(RS => A).toMany(RS => Option[B])) is specified, use #map((A,B) =>Z) instead."
)
)
with AllOutputDecisionsUnsupported[Z, E] {
def map(
zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
): OneToManies2SQL[A, B1, B2, HasExtractor, Z] = {
new OneToManies2SQL(statement, rawParameters)(one)(to1, to2)(zExtractor)
}
override def toIterable: OneToManies2SQLToIterable[A, B1, B2, E, Z] = {
new OneToManies2SQLToIterable[A, B1, B2, E, Z](statement, rawParameters)(
one
)(to1, to2)(zExtractor)
}
override def toList: OneToManies2SQLToList[A, B1, B2, E, Z] = {
new OneToManies2SQLToList[A, B1, B2, E, Z](statement, rawParameters)(one)(
to1,
to2
)(zExtractor)
}
override def toOption: OneToManies2SQLToOption[A, B1, B2, E, Z] = {
new OneToManies2SQLToOption[A, B1, B2, E, Z](statement, rawParameters)(one)(
to1,
to2
)(zExtractor)(true)
}
override def toCollection: OneToManies2SQLToCollection[A, B1, B2, E, Z] = {
new OneToManies2SQLToCollection[A, B1, B2, E, Z](statement, rawParameters)(
one
)(to1, to2)(zExtractor)
}
override def headOption: OneToManies2SQLToOption[A, B1, B2, E, Z] = {
new OneToManies2SQLToOption[A, B1, B2, E, Z](statement, rawParameters)(one)(
to1,
to2
)(zExtractor)(false)
}
override def single: OneToManies2SQLToOption[A, B1, B2, E, Z] = toOption
override def first: OneToManies2SQLToOption[A, B1, B2, E, Z] = headOption
override def list: OneToManies2SQLToList[A, B1, B2, E, Z] = toList
override def iterable: OneToManies2SQLToIterable[A, B1, B2, E, Z] = toIterable
override def collection: OneToManies2SQLToCollection[A, B1, B2, E, Z] =
toCollection
}
object OneToManies2SQL {
def unapply[A, B1, B2, E <: WithExtractor, Z](
sqlObject: OneToManies2SQL[A, B1, B2, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B1],
WrappedResultSet => Option[B2],
(A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.to1,
sqlObject.to2,
sqlObject.zExtractor
)
)
}
}
class OneToManies2SQLToList[A, B1, B2, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(
val to1: WrappedResultSet => Option[B1],
val to2: WrappedResultSet => Option[B2]
)(val zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z)
extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-many extractor(one(RS => A).toMany(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToList[Z, E]
with OneToManies2Extractor[A, B1, B2, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor
): List[Z] = {
executeQuery[List](
session,
(session: DBSession) =>
toIterable(session, statement, rawParameters, zExtractor).toList
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo1: WrappedResultSet => Option[B1] = to1
private[scalikejdbc] def extractTo2: WrappedResultSet => Option[B2] = to2
private[scalikejdbc] def transform
: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z = zExtractor
}
object OneToManies2SQLToList {
def unapply[A, B1, B2, E <: WithExtractor, Z](
sqlObject: OneToManies2SQLToList[A, B1, B2, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B1],
WrappedResultSet => Option[B2],
(A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.to1,
sqlObject.to2,
sqlObject.zExtractor
)
)
}
}
final class OneToManies2SQLToCollection[
A,
B1,
B2,
E <: WithExtractor,
Z
] private[scalikejdbc] (
override val statement: String,
override val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(
val to1: WrappedResultSet => Option[B1],
val to2: WrappedResultSet => Option[B2]
)(val zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z)
extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-many extractor(one(RS => A).toMany(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToCollection[Z, E]
with OneToManies2Extractor[A, B1, B2, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply[C[_]]()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor,
f: Factory[Z, C[Z]]
): C[Z] = {
executeQuery(
session,
(session: DBSession) =>
f.fromSpecific(
toIterable(session, statement, rawParameters, zExtractor)
)
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo1: WrappedResultSet => Option[B1] = to1
private[scalikejdbc] def extractTo2: WrappedResultSet => Option[B2] = to2
private[scalikejdbc] def transform
: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z = zExtractor
}
object OneToManies2SQLToCollection {
def unapply[A, B1, B2, E <: WithExtractor, Z](
sqlObject: OneToManies2SQLToCollection[A, B1, B2, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B1],
WrappedResultSet => Option[B2],
(A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.to1,
sqlObject.to2,
sqlObject.zExtractor
)
)
}
}
class OneToManies2SQLToIterable[A, B1, B2, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(
val to1: WrappedResultSet => Option[B1],
val to2: WrappedResultSet => Option[B2]
)(val zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z)
extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-many extractor(one(RS => A).toMany(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToIterable[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToManies2Extractor[A, B1, B2, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor
): Iterable[Z] = {
executeQuery[Iterable](
session,
(session: DBSession) =>
toIterable(session, statement, rawParameters, zExtractor)
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo1: WrappedResultSet => Option[B1] = to1
private[scalikejdbc] def extractTo2: WrappedResultSet => Option[B2] = to2
private[scalikejdbc] def transform
: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z = zExtractor
}
object OneToManies2SQLToIterable {
def unapply[A, B1, B2, E <: WithExtractor, Z](
sqlObject: OneToManies2SQLToIterable[A, B1, B2, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B1],
WrappedResultSet => Option[B2],
(A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.to1,
sqlObject.to2,
sqlObject.zExtractor
)
)
}
}
class OneToManies2SQLToOption[A, B1, B2, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any]
)(val one: WrappedResultSet => A)(
val to1: WrappedResultSet => Option[B1],
val to2: WrappedResultSet => Option[B2]
)(val zExtractor: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z)(
val isSingle: Boolean = true
) extends SQL[Z, E](statement, rawParameters)(
SQL.noExtractor[Z](
"one-to-many extractor(one(RS => A).toMany(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."
)
)
with SQLToOption[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToManies2Extractor[A, B1, B2, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit
session: DBSession,
context: ConnectionPoolContext = NoConnectionPoolContext,
hasExtractor: ThisSQL =:= SQLWithExtractor
): Option[Z] = {
executeQuery[Option](
session,
(session: DBSession) =>
toSingle(toIterable(session, statement, rawParameters, zExtractor))
)
}
private[scalikejdbc] def extractOne: WrappedResultSet => A = one
private[scalikejdbc] def extractTo1: WrappedResultSet => Option[B1] = to1
private[scalikejdbc] def extractTo2: WrappedResultSet => Option[B2] = to2
private[scalikejdbc] def transform
: (A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z = zExtractor
}
object OneToManies2SQLToOption {
def unapply[A, B1, B2, E <: WithExtractor, Z](
sqlObject: OneToManies2SQLToOption[A, B1, B2, E, Z]
): Some[
(
String,
scala.collection.Seq[Any],
WrappedResultSet => A,
WrappedResultSet => Option[B1],
WrappedResultSet => Option[B2],
(A, scala.collection.Seq[B1], scala.collection.Seq[B2]) => Z,
Boolean
)
] = {
Some(
(
sqlObject.statement,
sqlObject.rawParameters,
sqlObject.one,
sqlObject.to1,
sqlObject.to2,
sqlObject.zExtractor,
sqlObject.isSingle
)
)
}
}
|
keyno63/scalikejdbc | scalikejdbc-syntax-support-macro/src/main/scala-3/scalikejdbc/autoConstruct.scala | package scalikejdbc
import scala.quoted._
import java.sql.ResultSet
import language.`3.0`
object autoConstruct {
def applyResultName_impl[A](
rs: Expr[WrappedResultSet],
rn: Expr[ResultName[A]],
excludes: Expr[Seq[String]]
)(using quotes: Quotes)(using t: Type[A]): Expr[A] = {
import quotes.reflect._
val params = EntityUtil.constructorParams[A](excludes).collect {
case (name, typeTree, false, _) =>
val d = Implicits
.search(TypeRepr.of[TypeBinder].appliedTo(typeTree.tpe)) match {
case result: ImplicitSearchSuccess =>
val resultSet = '{ ${ rs }.underlying }.asTerm
val fieldName = '{ ${ rn }.field(${ Expr(name) }).value }.asTerm
Select.overloaded(
result.tree,
"apply",
Nil,
resultSet :: fieldName :: Nil,
typeTree.tpe
)
case _ =>
report.errorAndAbort(
s"could not find implicit of TypeBinder[${typeTree.show}]"
)
}
NamedArg(name, d)
case (name, _, true, Some(ref)) =>
NamedArg(name, ref)
}
val typeTree = TypeTree.of[A]
Select.overloaded(New(typeTree), "<init>", Nil, params).asExprOf[A]
}
def applySyntaxProvider_impl[A](
rs: Expr[WrappedResultSet],
sp: Expr[SQLSyntaxProvider[A]],
excludes: Expr[Seq[String]]
)(using quotes: Quotes)(using t: Type[A]): Expr[A] = {
import quotes.reflect._
applyResultName_impl(
rs,
Select.unique(sp.asTerm, "resultName").asExprOf[ResultName[A]],
excludes
)
}
inline def apply[A](
rs: WrappedResultSet,
sp: SyntaxProvider[A],
inline excludes: String*
): A = ${ applySyntaxProvider_impl('rs, 'sp, 'excludes) }
inline def apply[A](
rs: WrappedResultSet,
rn: ResultName[A],
inline excludes: String*
): A = ${ applyResultName_impl('rs, 'rn, 'excludes) }
}
|
keyno63/scalikejdbc | scalikejdbc-interpolation/src/main/scala-3/scalikejdbc/SelectDynamicMacro.scala | package scalikejdbc
import scalikejdbc.interpolation.SQLSyntax
import scala.quoted.*
trait SelectDynamicMacro[A] {
self: SQLSyntaxSupportFeature#SQLSyntaxProvider[A] =>
inline def selectDynamic(inline name: String): SQLSyntax =
select[A](self, name)
inline def select[E](
ref: SQLSyntaxSupportFeature#SQLSyntaxProvider[A],
inline name: String
): SQLSyntax =
${ SelectDynamicMacroImpl.selectImpl[E]('ref, 'name) }
}
object SelectDynamicMacroImpl {
def selectImpl[E: Type](
ref: Expr[SQLSyntaxSupportFeature#SQLSyntaxProvider[_]],
name: Expr[String]
)(using quotes: Quotes): Expr[SQLSyntax] = {
import quotes.reflect.*
val typeSymbol = TypeRepr.of[E].typeSymbol
val expectedNames = typeSymbol.caseFields.map(_.name)
name.value.foreach { _name =>
if (expectedNames.nonEmpty && !expectedNames.contains(_name)) {
report.error(
s"${typeSymbol.fullName}#${_name} not found. Expected fields are ${expectedNames
.mkString("#", ", #", "")}",
name.asTerm.pos
)
}
}
'{ $ref.field($name) }
}
}
|
keyno63/scalikejdbc | project/GenerateOneToManies.scala | <reponame>keyno63/scalikejdbc
object GenerateOneToManies {
def apply(n: Int): String = {
val A = "A"
val B = "B"
val tparams = (1 to n).map(B + _)
val bs = tparams.mkString(", ")
val seq = tparams.map("scala.collection.Seq[" + _ + "]").mkString(", ")
val extractTo = "extractTo"
val extractToN = (1 to n)
.map { i =>
s" private[scalikejdbc] def $extractTo$i: WrappedResultSet => Option[B$i] = to$i"
}
.mkString("\n")
val extractOne =
" private[scalikejdbc] def extractOne: WrappedResultSet => A = one"
val transform =
s" private[scalikejdbc] def transform: (A, $seq) => Z = zExtractor"
val resultSetToOptions = (1 to n)
.map { i => s"val to$i: WrappedResultSet => Option[B$i]" }
.mkString(", ")
val to = (1 to n).map("to" + _).mkString(", ")
val resultSetToOptionsType =
tparams.map("WrappedResultSet => Option[" + _ + "]").mkString(", ")
val sqlTo = (1 to n).map("sqlObject.to" + _).mkString(", ")
s"""/*
* Copyright 2013 - 2015 scalikejdbc.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package scalikejdbc
import scala.collection.mutable.LinkedHashMap
import scala.collection.compat._
private[scalikejdbc] trait OneToManies${n}Extractor[$A, $bs, E <: WithExtractor, Z]
extends SQL[Z, E]
with RelationalSQLResultSetOperations[Z] {
private[scalikejdbc] def extractOne: WrappedResultSet => $A
${(1 to n)
.map { i =>
s" private[scalikejdbc] def $extractTo$i: WrappedResultSet => Option[B$i]"
}
.mkString("\n")}
private[scalikejdbc] def transform: ($A, $seq) => Z
private[scalikejdbc] def processResultSet(result: (LinkedHashMap[$A, ($seq)]),
rs: WrappedResultSet): LinkedHashMap[A, ($seq)] = {
val o = extractOne(rs)
val (${(1 to n).map("to" + _).mkString(", ")}) = (${(1 to n)
.map(extractTo + _ + "(rs)")
.mkString(", ")})
if (result.contains(o)) {
${(1 to n).map("to" + _).mkString("(", " orElse ", ")")}.map { _ =>
val (${(1 to n).map("ts" + _).mkString(", ")}) = result.apply(o)
result += (o -> ((
${(1 to n)
.map { i =>
s" to$i.map(t => if (ts$i.contains(t)) ts$i else ts$i :+ t).getOrElse(ts$i)"
}
.mkString(",\n")}
)))
}.getOrElse(result)
} else {
result += (
o -> ((
${(1 to n)
.map { i =>
s" to$i.map(t => Vector(t)).getOrElse(Vector.empty)"
}
.mkString(",\n")}
))
)
}
}
private[scalikejdbc] def toIterable(session: DBSession, sql: String, params: scala.collection.Seq[_], zExtractor: (A, $seq) => Z): Iterable[Z] = {
val attributesSwitcher = createDBSessionAttributesSwitcher
DBSessionWrapper(session, attributesSwitcher).foldLeft(statement, rawParameters.toSeq: _*)(LinkedHashMap[A, ($seq)]())(processResultSet _).map {
case (one, (${(1 to n)
.map("t" + _)
.mkString(", ")})) => zExtractor(one, ${(1 to n)
.map("t" + _)
.mkString(", ")})
}
}
}
class OneToManies${n}SQL[A, $bs, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any])(val one: WrappedResultSet => A)($resultSetToOptions)(val zExtractor: (A, $seq) => Z)
extends SQL[Z, E](statement, rawParameters)(SQL.noExtractor[Z]("one-to-many extractor(one(RS => A).toManies(RS => Option[B1]...)) is specified, use #map((A,B) =>Z) instead."))
with AllOutputDecisionsUnsupported[Z, E] {
def map(zExtractor: (A, $seq) => Z): OneToManies${n}SQL[A, $bs, HasExtractor, Z] = {
new OneToManies${n}SQL(statement, rawParameters)(one)($to)(zExtractor)
}
override def toIterable: OneToManies${n}SQLToIterable[A, $bs, E, Z] = {
new OneToManies${n}SQLToIterable[A, $bs, E, Z](statement, rawParameters)(one)($to)(zExtractor)
}
override def toList: OneToManies${n}SQLToList[A, $bs, E, Z] = {
new OneToManies${n}SQLToList[A, $bs, E, Z](statement, rawParameters)(one)($to)(zExtractor)
}
override def toOption: OneToManies${n}SQLToOption[A, $bs, E, Z] = {
new OneToManies${n}SQLToOption[A, $bs, E, Z](statement, rawParameters)(one)($to)(zExtractor)(true)
}
override def headOption: OneToManies${n}SQLToOption[A, $bs, E, Z] = {
new OneToManies${n}SQLToOption[A, $bs, E, Z](statement, rawParameters)(one)($to)(zExtractor)(false)
}
override def toCollection: OneToManies${n}SQLToCollection[A, $bs, E, Z] = {
new OneToManies${n}SQLToCollection[A, ${bs}, E, Z](statement, rawParameters)(one)($to)(zExtractor)
}
override def single: OneToManies${n}SQLToOption[A, $bs, E, Z] = toOption
override def first: OneToManies${n}SQLToOption[A, $bs, E, Z] = headOption
override def list: OneToManies${n}SQLToList[A, $bs, E, Z] = toList
override def iterable: OneToManies${n}SQLToIterable[A, $bs, E, Z] = toIterable
override def collection: OneToManies${n}SQLToCollection[A, $bs, E, Z] = toCollection
}
object OneToManies${n}SQL {
def unapply[A, $bs, E <: WithExtractor, Z](sqlObject: OneToManies${n}SQL[A, $bs, E, Z]): Some[(String, scala.collection.Seq[Any], WrappedResultSet => A, ($resultSetToOptionsType), (A, $seq) => Z)] = {
Some((sqlObject.statement, sqlObject.rawParameters, sqlObject.one, ($sqlTo), sqlObject.zExtractor))
}
}
class OneToManies${n}SQLToList[A, $bs, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any])(val one: WrappedResultSet => A)($resultSetToOptions)(val zExtractor: (A, $seq) => Z)
extends SQL[Z, E](statement, rawParameters)(SQL.noExtractor[Z]("one-to-many extractor(one(RS => A).toManies(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."))
with SQLToList[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToManies${n}Extractor[A, $bs, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit session: DBSession, context: ConnectionPoolContext = NoConnectionPoolContext, hasExtractor: ThisSQL =:= SQLWithExtractor): List[Z] = {
executeQuery[List](session, (session: DBSession) => toIterable(session, statement, rawParameters, zExtractor).toList)
}
$extractOne
$extractToN
$transform
}
object OneToManies${n}SQLToList {
def unapply[A, $bs, E <: WithExtractor, Z](sqlObject: OneToManies${n}SQLToList[A, $bs, E, Z]): Some[(String, scala.collection.Seq[Any], WrappedResultSet => A, ($resultSetToOptionsType), (A, $seq) => Z)] = {
Some((sqlObject.statement, sqlObject.rawParameters, sqlObject.one, ($sqlTo), sqlObject.zExtractor))
}
}
final class OneToManies${n}SQLToCollection[A, $bs, E <: WithExtractor, Z] private[scalikejdbc](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any])(val one: WrappedResultSet => A)($resultSetToOptions)(val zExtractor: (A, $seq) => Z)
extends SQL[Z, E](statement, rawParameters)(SQL.noExtractor[Z]("one-to-many extractor(one(RS => A).toManies(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."))
with SQLToCollection[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToManies${n}Extractor[A, $bs, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply[C[_]]()(implicit session: DBSession, context: ConnectionPoolContext = NoConnectionPoolContext, hasExtractor: ThisSQL =:= SQLWithExtractor, f: Factory[Z, C[Z]]): C[Z] = {
executeQuery(session, (session: DBSession) => f.fromSpecific(toIterable(session, statement, rawParameters, zExtractor)))
}
$extractOne
$extractToN
$transform
}
object OneToManies${n}SQLToCollection {
def unapply[A, $bs, E <: WithExtractor, Z](sqlObject: OneToManies${n}SQLToCollection[A, $bs, E, Z]): Some[(String, scala.collection.Seq[Any], WrappedResultSet => A, ($resultSetToOptionsType), (A, $seq) => Z)] = {
Some((sqlObject.statement, sqlObject.rawParameters, sqlObject.one, ($sqlTo), sqlObject.zExtractor))
}
}
class OneToManies${n}SQLToIterable[A, $bs, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any])(val one: WrappedResultSet => A)($resultSetToOptions)(val zExtractor: (A, $seq) => Z)
extends SQL[Z, E](statement, rawParameters)(SQL.noExtractor[Z]("one-to-many extractor(one(RS => A).toMany(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."))
with SQLToIterable[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToManies${n}Extractor[A, $bs, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit session: DBSession, context: ConnectionPoolContext = NoConnectionPoolContext, hasExtractor: ThisSQL =:= SQLWithExtractor): Iterable[Z] = {
executeQuery[Iterable](session, (session: DBSession) => toIterable(session, statement, rawParameters, zExtractor))
}
$extractOne
$extractToN
$transform
}
object OneToManies${n}SQLToIterable {
def unapply[A, $bs, E <: WithExtractor, Z](sqlObject: OneToManies${n}SQLToIterable[A, $bs, E, Z]): Some[(String, scala.collection.Seq[Any], WrappedResultSet => A, ($resultSetToOptionsType), (A, $seq) => Z)] = {
Some((sqlObject.statement, sqlObject.rawParameters, sqlObject.one, ($sqlTo), sqlObject.zExtractor))
}
}
class OneToManies${n}SQLToOption[A, $bs, E <: WithExtractor, Z](
override val statement: String,
override val rawParameters: scala.collection.Seq[Any])(val one: WrappedResultSet => A)($resultSetToOptions)(val zExtractor: (A, $seq) => Z)(val isSingle: Boolean = true)
extends SQL[Z, E](statement, rawParameters)(SQL.noExtractor[Z]("one-to-many extractor(one(RS => A).toMany(RS => Option[B1])) is specified, use #map((A,B) =>Z) instead."))
with SQLToOption[Z, E]
with AllOutputDecisionsUnsupported[Z, E]
with OneToManies${n}Extractor[A, $bs, E, Z] {
import GeneralizedTypeConstraintsForWithExtractor._
override def apply()(implicit session: DBSession, context: ConnectionPoolContext = NoConnectionPoolContext, hasExtractor: ThisSQL =:= SQLWithExtractor): Option[Z] = {
executeQuery[Option](session, (session: DBSession) => toSingle(toIterable(session, statement, rawParameters, zExtractor)))
}
$extractOne
$extractToN
$transform
}
object OneToManies${n}SQLToOption {
def unapply[A, $bs, E <: WithExtractor, Z](sqlObject: OneToManies${n}SQLToOption[A, $bs, E, Z]): Some[(String, scala.collection.Seq[Any], WrappedResultSet => A, ($resultSetToOptionsType), (A, $seq) => Z, Boolean)] = {
Some((sqlObject.statement, sqlObject.rawParameters, sqlObject.one, ($sqlTo), sqlObject.zExtractor, sqlObject.isSingle))
}
}
"""
}
}
|
kipsigman/async-web-service-clients | build.sbt | import Dependencies._
name := "async-web-service-clients"
lazy val commonSettings = Seq(
scalaVersion := "2.11.8",
organization := "kipsigman",
licenses += ("Apache-2.0", url("https://github.com/kipsigman/async-web-service-clients/blob/master/LICENSE")),
homepage := Some(url("https://github.com/kipsigman/async-web-service-clients")),
scmInfo := Some(ScmInfo(url("https://github.com/kipsigman/async-web-service-clients"), "scm:git:git://github.com:kipsigman/async-web-service-clients.git"))
)
lazy val webServiceClient = (project in file("web-service-client")).
configs(IntegrationTest).
settings(commonSettings: _*).
settings(Defaults.itSettings: _*).
settings(
name := "web-service-client",
libraryDependencies ++= di ++ dispatch ++ play ++ scalaTest
)
lazy val googleApiClients = (project in file("google-api-clients")).
dependsOn(webServiceClient).
aggregate(webServiceClient).
configs(IntegrationTest).
settings(commonSettings: _*).
settings(Defaults.itSettings: _*).
settings(
name := "google-api-clients",
libraryDependencies ++= Seq(
"com.google.apis" % "google-api-services-analytics" % "v3-rev116-1.20.0",
"com.google.api-client" % "google-api-client-gson" % "1.20.0"
) ++ scalaTest
)
lazy val salesforceApiClient = (project in file("salesforce-api-client")).
dependsOn(webServiceClient).
aggregate(webServiceClient).
configs(IntegrationTest).
settings(commonSettings: _*).
settings(Defaults.itSettings: _*).
settings(
name := "salesforce-api-client",
libraryDependencies ++= akka ++ scalaTest
) |
kipsigman/async-web-service-clients | web-service-client/src/main/scala/kipsigman/ws/client/RestWebServiceClient.scala | package kipsigman.ws.client
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import org.slf4j.LoggerFactory
import com.ning.http.client.Response
import dispatch._
import play.api.http._
import play.api.libs.json._
/**
* Base trait for an asyncronous Scala RESTful web service client.
* Contains error handling and helpful debug logging.
*/
trait RestWebServiceClient {
protected val logger = LoggerFactory.getLogger(this.getClass)
protected implicit val ec: ExecutionContext
/**
* Prepares request with content headers, method, etc
*/
protected[ws] def prepareReq(
baseReq: Req,
method: String,
mimeType: String = MimeTypes.JSON,
postProcess: Req => Req = x => x): Req = {
// Add Content Type headers
val contentType = ContentTypes.withCharset(mimeType) // Adds charset
val req1 = baseReq.addHeader(HeaderNames.ACCEPT, contentType).addHeader(HeaderNames.CONTENT_TYPE, contentType)
// Add Method
val req2 = method match {
case HttpVerbs.DELETE => req1.DELETE
case HttpVerbs.GET => req1.GET
case HttpVerbs.PATCH => req1.PATCH
case HttpVerbs.POST => req1.POST
case HttpVerbs.PUT => req1.PUT
}
// Post process and return
postProcess(req2)
}
/**
* Makes basic call
*/
protected def execute(req: Req): Future[Response] = {
Http(req > as.Response(response => {
logger.debug(s"""execute:{"request":"${req.toRequest}", "responseStatusCode":${response.getStatusCode}, "responseBody":${response.getResponseBody}}""")
response
}))
}
/**
* Makes call and validates Response status returning:
* - Some(Response): Resource found, call successful
* - None: Resource not found
* @throws WebServiceException if an unexpected status is returned
*/
protected def execute(req: Req, method: String): Future[Option[Response]] = {
val validPStatuses = Seq(Status.OK, Status.CREATED, Status.ACCEPTED, Status.NO_CONTENT)
execute(req).map(response =>
(method, response.getStatusCode) match {
case (HttpVerbs.DELETE, Status.OK) => Option(response)
case (HttpVerbs.DELETE, Status.NO_CONTENT) => Option(response)
case (HttpVerbs.GET, Status.OK) => Option(response)
case (HttpVerbs.PATCH, s) if (validPStatuses.contains(s)) => Option(response)
case (HttpVerbs.POST, s) if (validPStatuses.contains(s)) => Option(response)
case (HttpVerbs.PUT, s) if (validPStatuses.contains(s)) => Option(response)
case (m, Status.NOT_FOUND) if (m != HttpVerbs.POST) => None
case _ => throw RestException(response)
}
)
}
protected def executeJson(req: Req, method: String): Future[Option[JsValue]] = {
val responseOptionFuture = execute(req, method).map(_.map(response => {
if (response.getResponseBody.isEmpty()) {
Json.obj()
} else {
val responseBody = response.getResponseBody
Json.parse(responseBody)
}
}))
responseOptionFuture onFailure {
case t: Throwable => logger.error("executeJson exception", t)
}
responseOptionFuture
}
}
case class RestException(response: Response) extends Throwable({
val jsonStr = Json.stringify(Json.obj("statusCode" -> response.getStatusCode, "statusText" -> response.getStatusText, "responseBody" -> response.getResponseBody))
s"Web Service Exception: $jsonStr"
}) |
kipsigman/async-web-service-clients | google-api-clients/src/main/scala/kipsigman/ws/google/youtube/YouTubeVideo.scala | package kipsigman.ws.google.youtube
import java.util.Date
import java.util.TimeZone
import java.text.SimpleDateFormat
import play.api.libs.functional.syntax._
import play.api.libs.json._
import play.api.libs.json.Reads._
case class YouTubeVideo(
id: String,
publishedAt: Date,
title: String,
description: String) {
val url: String = s"//youtu.be/$id"
val embedUrl: String = s"//www.youtube.com/embed/$id"
}
object YouTubeVideo {
private val datePattern = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"
private val timeZone = TimeZone.getTimeZone("UTC")
def bindDate(dateStr: String): Date = {
val df = new SimpleDateFormat(datePattern);
df.setTimeZone(timeZone)
df.parse(dateStr)
}
private val dateReads = Reads.dateReads(datePattern)
val searchReads: Reads[YouTubeVideo] = (
(JsPath \ "id" \ "videoId").read[String] and
(JsPath \ "snippet" \ "publishedAt").read[Date](dateReads) and
(JsPath \ "snippet" \ "title").read[String] and
(JsPath \ "snippet" \ "description").read[String]
)(YouTubeVideo.apply _)
val videosReads: Reads[YouTubeVideo] = (
(JsPath \ "id").read[String] and
(JsPath \ "snippet" \ "publishedAt").read[Date](dateReads) and
(JsPath \ "snippet" \ "title").read[String] and
(JsPath \ "snippet" \ "description").read[String]
)(YouTubeVideo.apply _)
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/ContractSpec.scala | <gh_stars>1-10
package kipsigman.ws.salesforce
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
class ContractSpec extends WordSpec with Matchers with SampleData {
"selectFields" should {
"return the correct representations" in {
Contract.selectFields shouldBe Seq("Id", "Opportunity__c", "StartDate", "EndDate")
Contract.selectFieldsStr shouldBe "Id, Opportunity__c, StartDate, EndDate"
Contract.objectPrefixedSelectFields shouldBe Seq("Contract.Id", "Contract.Opportunity__c", "Contract.StartDate", "Contract.EndDate")
}
}
"JSON deserialization" should {
"read valid JSON" in {
sampleContractJson.validate[Contract] match {
case s: JsSuccess[Contract] => {
val contract = s.get
contract shouldBe sampleContract
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/OpportunityContactRoleSpec.scala | package kipsigman.ws.salesforce
import OpportunityContactRole.Role;
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
class OpportunityContactRoleSpec extends WordSpec with Matchers with SampleData {
"JSON deserialization" should {
"read valid JSON" in {
sampleOpportunityContactRoleJson.validate[OpportunityContactRole] match {
case s: JsSuccess[OpportunityContactRole] => {
val ocr = s.get
ocr shouldBe sampleOpportunityContactRole
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
"Role.name" should {
"return proper name for Role objects" in {
Role.BusinessUser.name shouldBe "Business User"
Role.TechnicalBuyer.name shouldBe "Technical Buyer"
}
}
"Role.apply" should {
"select existing Role object" in {
Role.apply(Role.BusinessUser.name) shouldBe Role.BusinessUser
Role.apply(Role.TechnicalBuyer.name) shouldBe Role.TechnicalBuyer
}
"create new Role when existing object not found" in {
val roleName = "Bogus"
val role = Role.apply(roleName)
role shouldBe Role.Unknown(roleName)
role.name shouldBe roleName
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/SampleData.scala | package kipsigman.ws.salesforce
import scala.io.Source
import play.api.libs.json._
trait SampleData {
protected val sampleAccountJson = Json.parse(Source.fromURL(getClass.getResource("/account.json")).getLines().mkString)
protected val sampleAccount: Account = {
sampleAccountJson.as[Account]
}
protected val sampleContactJson = Json.parse(Source.fromURL(getClass.getResource("/contact.json")).getLines().mkString)
protected val sampleContact: Contact = {
sampleContactJson.as[Contact]
}
protected val sampleContractJson = Json.parse(Source.fromURL(getClass.getResource("/contract.json")).getLines().mkString)
protected val sampleContract: Contract = {
sampleContractJson.as[Contract]
}
protected val sampleProductJson = Json.parse(Source.fromURL(getClass.getResource("/product2.json")).getLines().mkString)
protected val sampleProduct: Product = {
sampleProductJson.as[Product]
}
protected val sampleOpportunityJson = Json.parse(Source.fromURL(getClass.getResource("/opportunity.json")).getLines().mkString)
protected val sampleOpportunity: Opportunity = {
sampleOpportunityJson.as[Opportunity]
}
protected val sampleOpportunityContactRoleJson = Json.parse(Source.fromURL(getClass.getResource("/opportunityContactRole.json")).getLines().mkString)
protected val sampleOpportunityContactRole: OpportunityContactRole = {
sampleOpportunityContactRoleJson.as[OpportunityContactRole]
}
protected val sampleOpportunityLineItemJson = Json.parse(Source.fromURL(getClass.getResource("/opportunityLineItem.json")).getLines().mkString)
protected val sampleOpportunityLineItem: OpportunityLineItem = {
sampleOpportunityLineItemJson.as[OpportunityLineItem]
}
private[salesforce] def parseDate(input: String): java.util.Date = {
val df = new java.text.SimpleDateFormat("yyyy-MM-dd")
df.setLenient(false)
df.parse(input)
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/it/scala/kipsigman/ws/salesforce/SalesforceApiClientReadsIntegrationSpec.scala | <gh_stars>1-10
package kipsigman.ws.salesforce
import java.time._
import org.scalatest.time._
import org.scalatest.WordSpec
import OpportunityContactRole.Role
import Product._
class SalesforceApiClientReadsIntegrationSpec extends SalesforceApiClientIntegrationSpec {
// "bad accessInfo" should {
// "return Exception" in {
// val badAccessInfo = accessInfo.copy(accessToken = "invalid")
// val accountFuture = client.findAccountById(sampleAccount.id.get)(badAccessInfo)
// whenReady(accountFuture) { accountOption =>
// fail
// }
// }
// }
"findAccountByEmail" should {
"return Account for valid email" in {
val accountFuture = client.findAccountByEmail(sampleContactEmail)
whenReady(accountFuture) { accountOption =>
accountOption shouldBe defined
accountOption.get shouldBe sampleAccount
}
}
"return None invalid email" in {
val email = "<EMAIL>"
val accountFuture = client.findAccountByEmail(email)
whenReady(accountFuture) { accountOption =>
accountOption shouldBe None
}
}
}
"findAccountById" should {
"return Account for valid id" in {
val accountFuture = client.findAccountById(sampleAccount.id.get)
whenReady(accountFuture) { accountOption =>
accountOption shouldBe defined
accountOption.get shouldBe sampleAccount
}
}
"return None for invalid id" in {
val accountFuture = client.findAccountById("invalid")
whenReady(accountFuture) { accountOption =>
accountOption shouldBe None
}
}
}
"findContactByEmail" should {
"return Contact for valid email" in {
val contactFuture = client.findContactByEmail(sampleContactEmail)
whenReady(contactFuture) { contactOption =>
contactOption shouldBe defined
val contact = contactOption.get
contact.name shouldBe sampleContactName
contact.email shouldBe sampleContactEmail
}
}
"return None invalid email" in {
val email = "<EMAIL>"
val contactFuture = client.findContactByEmail(email)
whenReady(contactFuture) { contactOption =>
contactOption shouldBe None
}
}
}
"findContractById" should {
"return results for valid id" in {
val contractOptionFuture = client.findContractById(sampleContractId)
whenReady(contractOptionFuture) { contractOption =>
val contract = contractOption.get
contract.id.get should startWith(sampleContractId)
contract.opportunityId shouldBe "0068A000001sxanQAA"
formatDateTime(contract.startDate) shouldBe "2015-09-02 00:00:00 UTC"
formatDateTime(contract.endDate) shouldBe "2016-09-01 00:00:00 UTC"
contract.isCurrent shouldBe true
contract.isCurrentOrFuture shouldBe true
contract.isExpired shouldBe false
}
}
}
"findExpiredContracts" should {
"get some Contracts" in {
val startDate = LocalDate.of(2015, 11, 5)
val endDate = LocalDate.of(2015, 11, 8)
val x = client.findExpiredContracts(startDate, endDate)
whenReady(x) { contracts =>
contracts.foreach(contract => logger.debug(s"Expired Contract: ${contract.id.get}: ${contract.startDate} - ${contract.endDate}"))
contracts.size should be > 0
}
}
}
"findOpportunityById" should {
"return results for valid id" in {
val opportunityOptionFuture = client.findOpportunityById(sampleOpportunityId)
whenReady(opportunityOptionFuture) { opportunityOption =>
val opportunity = opportunityOption.get
opportunity.id.get should startWith(sampleOpportunityId)
opportunity.account.name shouldBe sampleAccount.name
opportunity.name shouldBe "New PSS"
}
}
}
"findOpportunityByContractId" should {
"return results for valid id" in {
val opportunityOptionFuture = client.findOpportunityByContractId(sampleContractId)
whenReady(opportunityOptionFuture) { opportunityOption =>
val opportunity = opportunityOption.get
opportunity.id.get should startWith(sampleOpportunityId)
opportunity.account.name shouldBe sampleAccount.name
opportunity.name shouldBe "New PSS"
}
}
}
// "findRecentlyUpdatedClosedWonOpportunities" should {
// "return results if updated Opps" in {
// val sinceDateTime = java.time.ZonedDateTime.now().minusDays(7)
// val opportunitiesFuture = client.findRecentlyUpdatedClosedWonOpportunities(sinceDateTime)
// whenReady(opportunitiesFuture) { opportunities =>
// logger.debug(s"opportunities: $opportunities")
// opportunities.isEmpty shouldBe true
// }
// }
// }
"findRecentlyExpiredOpportunities" should {
"get some Opps" in {
val x = client.findRecentlyExpiredOpportunities(90)
whenReady(x) { opps =>
opps.foreach(opp => logger.info(s"Expired subscription Opp: ${opp.account.name} - ${opp.name}"))
opps.size should be > 0
}
}
}
"findCurrentOrFutureOpportunitiesByEmail" should {
"return results for valid email attached to role" in {
val opportunitiesFuture = client.findCurrentOrFutureOpportunitiesByEmail(sampleContactEmail)
whenReady(opportunitiesFuture) { opportunities =>
logger.debug(s"opportunities: $opportunities")
val opportunity = opportunities.find(_.name == "New PSS").get
opportunity.id.get should startWith(sampleOpportunityId)
opportunity.contracts.size shouldBe 1
opportunity.products.size shouldBe 2 // PSS + ConductR
opportunity.products.head.family shouldBe ProductFamily.A
}
}
}
"findOpportunityContactRoles" should {
"return results for valid Opportunity" in {
val ocrsFuture = client.findOpportunityContactRoles(sampleOpportunityId, Role.BusinessUser)
whenReady(ocrsFuture) { ocrs =>
logger.debug(s"ocrs: $ocrs")
ocrs.size should be >= 1
}
}
}
} |
kipsigman/async-web-service-clients | google-api-clients/src/main/scala/kipsigman/ws/google/GoogleApiClient.scala | package kipsigman.ws.google
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import com.typesafe.config.Config
import dispatch.Req
import dispatch.url
import play.api.http.HttpVerbs
import play.api.libs.json._
import play.api.libs.json.Reads._
import play.api.libs.functional.syntax._
import kipsigman.ws.client.RestWebServiceClient
abstract class GoogleApiClient(config: Config)(protected implicit val ec: ExecutionContext) extends RestWebServiceClient {
protected def apiKey = config.getString("google.apiKey")
protected def applicationName = config.getString("google.applicationName")
protected def serviceAccountEmail = config.getString("google.serviceAccountEmail")
protected def apiKeyAuthorization(req: Req): Req = {
req <<? Map("key" -> apiKey)
}
protected def executeGoogleApiReq(baseReq: Req, method: String, authorization: Req => Req): Future[Option[JsValue]] = {
val req = prepareReq(baseReq, method, postProcess = authorization)
executeJson(req, method)
}
protected def executeGetPage[T](baseReq: Req, authorization: Req => Req)(implicit ec: ExecutionContext, rds: Reads[T]): Future[Page[T]] = {
executeGoogleApiReq(baseReq, HttpVerbs.GET, authorization) map {
case Some(jsValue) => jsValue.as[Page[T]]
case None => Page()
}
}
protected def executeGetPage[T](
serviceUrl: String,
params: Map[String, String],
authorization: Req => Req = apiKeyAuthorization
)(implicit ec: ExecutionContext, rds: Reads[T]): Future[Page[T]] = {
val req = url(serviceUrl) <<? params
executeGetPage(req, authorization)
}
}
case class Page[T](
items: Seq[T] = Seq[T](),
resultsPerPage: Int = Page.defaultResultsPerPage,
totalResults: Int = 0,
prevPageToken: Option[String] = None,
nextPageToken: Option[String] = None)
object Page {
val defaultResultsPerPage = 10
implicit def pageReads[T](implicit itemRds: Reads[T]): Reads[Page[T]] = (
(JsPath \ "items").read[Seq[T]] and
(JsPath \ "pageInfo" \ "resultsPerPage").read[Int] and
(JsPath \ "pageInfo" \ "totalResults").read[Int] and
(JsPath \ "prevPageToken").readNullable[String] and
(JsPath \ "nextPageToken").readNullable[String]
)(Page.apply[T] _)
def offset(pageIndex: Int, resultsPerPage: Int): Int = pageIndex * resultsPerPage
} |
kipsigman/async-web-service-clients | google-api-clients/src/main/scala/kipsigman/ws/google/analytics/AnalyticsApiClient.scala | <filename>google-api-clients/src/main/scala/kipsigman/ws/google/analytics/AnalyticsApiClient.scala
package kipsigman.ws.google.analytics
import java.io.File
import java.time.LocalDate
import java.time.format.DateTimeFormatter
import javax.inject.Inject
import javax.inject.Singleton
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential
import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport
import com.google.api.client.http.HttpTransport
import com.google.api.client.json.JsonFactory
import com.google.api.client.json.gson.GsonFactory
import com.google.api.services.analytics.Analytics
import com.google.api.services.analytics.AnalyticsScopes
import com.google.api.services.analytics.model.AccountSummaries;
import com.google.api.services.analytics.model.AccountSummary;
import com.google.api.services.analytics.model.Accounts;
import com.google.api.services.analytics.model.GaData;
import com.google.api.services.analytics.model.ProfileSummary;
import com.google.api.services.analytics.model.Profiles;
import com.google.api.services.analytics.model.WebPropertySummary;
import com.google.api.services.analytics.model.Webproperties;
import com.typesafe.config.Config
import kipsigman.ws.google.GoogleApiClient
/**
* Google Analytics API client.
* This is a work in progress, only minimal functionality available.
*/
@Singleton
class AnalyticsApiClient @Inject() (config: Config, p12File: File)(implicit ec: ExecutionContext) extends GoogleApiClient(config) {
private val analytics: Analytics = initializeAnalytics
/////////////////
// Builder methods
/////////////////
private[analytics] def initializeAnalytics: Analytics = {
// Initializes an authorized analytics service object.
val jsonFactory: JsonFactory = GsonFactory.getDefaultInstance();
val httpTransport: HttpTransport = GoogleNetHttpTransport.newTrustedTransport();
val credential: GoogleCredential = new GoogleCredential.Builder()
.setTransport(httpTransport)
.setJsonFactory(jsonFactory)
.setServiceAccountId(serviceAccountEmail)
.setServiceAccountPrivateKeyFromP12File(p12File)
.setServiceAccountScopes(AnalyticsScopes.all())
.build();
// Construct the Analytics service object.
new Analytics.Builder(httpTransport, jsonFactory, credential).setApplicationName(applicationName).build();
}
private[analytics] def filter(dimension: Dimension, operator: FilterOperator, expression: String): Filters = {
new Filters(s"${dimension.toString}${operator.underlying}$expression")
}
private[analytics] def filterMetric(metric: Metric, operator: FilterOperator, expression: String): Filters = {
new Filters(s"${metric.toString}${operator.underlying}$expression")
}
private[analytics] def eventFilters(eventCategory: String, eventAction: String): Filters = {
val filtersStr = filter(Dimension.EventCategory, FilterOperator.equals, eventCategory) +
FilterOperator.and.underlying +
filter(Dimension.EventAction, FilterOperator.equals, eventAction)
new Filters(filtersStr)
}
private[analytics] def eventFilters(eventCategory: String, eventAction: String, eventLabels: Seq[String]): Filters = {
val filtersStr = eventLabels.map(label => filter(Dimension.EventLabel, FilterOperator.equals, label).underlying).mkString(FilterOperator.or.underlying) +
FilterOperator.and.underlying +
eventFilters(eventCategory, eventAction).underlying
new Filters(filtersStr)
}
private[analytics] def getEvents(profileId: String, startDate: Date, endDate: Date, filters: Filters): Future[Seq[DataRow]] = Future {
val data: GaData = analytics.data().ga()
.get("ga:" + profileId, startDate.underlying, endDate.underlying, Metric.TotalEvents.toString)
.setDimensions(Dimension.EventLabel.toString)
.setFilters(filters.underlying)
.execute()
if (data.getTotalResults > 0) {
data.getRows.map(row => DataRow(row.get(0), row.get(1)))
} else {
Seq.empty[DataRow]
}
}
/////////////////
// Public methods
/////////////////
def getEvents(
profileId: String,
startDate: Date,
endDate: Date,
eventCategory: String,
eventAction: String,
eventLabels: Seq[String]): Future[Seq[DataRow]] = {
val filters = eventFilters(eventCategory, eventAction, eventLabels)
getEvents(profileId, startDate, endDate, filters)
}
def getPageViewsByPath(profileId: String, startDate: Date, endDate: Date, path: String): Future[Seq[DataRow]] = Future {
val filters = filter(Dimension.PagePath, FilterOperator.equals, path)
val data: GaData = analytics.data().ga()
.get("ga:" + profileId, startDate.underlying, endDate.underlying, Metric.Pageviews.toString)
.setDimensions(Dimension.PagePath.toString)
.setFilters(filters.underlying)
.execute();
data.getRows.map(row => DataRow(row.get(0), row.get(1)))
}
/////////////////
// Debug methods
/////////////////
/**
* Used to print out profile ids for gathering config info.
*/
private[analytics] def accountSummaries = {
val accountSummaries: AccountSummaries = analytics.management().accountSummaries().list().execute()
accountSummaries.getItems.foreach(account => {
logger.info(account.getName() + " (" + account.getId() + ")")
printPropertySummaries(account)
})
def printPropertySummaries(accountSummary: AccountSummary) {
accountSummary.getWebProperties.foreach(property => {
logger.info(" " + property.getName() + " (" + property.getId() + ")")
logger.info(" [" + property.getWebsiteUrl() + " | " + property.getLevel() + "]")
printProfileSummary(property)
})
}
def printProfileSummary(webPropertySummary: WebPropertySummary) {
webPropertySummary.getProfiles.foreach(profile => {
logger.info(" " + profile.getName() + " (" + profile.getId() + ") | " + profile.getType())
})
}
}
}
case class DataRow(dimension: String, metric: String)
class Date(val underlying: String) extends AnyVal {
override def toString: String = underlying
}
object Date {
val today = new Date("today")
val yesterday = new Date("yesterday")
def daysAgo(num: Int) = new Date(s"${num}daysAgo")
def fromLocalDate(localDate: LocalDate): Date = {
new Date(localDate.format(DateTimeFormatter.ISO_LOCAL_DATE))
}
}
sealed abstract class Dimension(val name: String) {
override def toString: String = name
}
object Dimension {
case object EventAction extends Dimension("ga:eventAction")
case object EventCategory extends Dimension("ga:eventCategory")
case object EventLabel extends Dimension("ga:eventLabel")
case object PagePath extends Dimension("ga:pagePath")
}
class Filters(val underlying: String) extends AnyVal {
override def toString: String = underlying
}
sealed abstract class Metric(val name: String) {
override def toString: String = name
}
object Metric {
case object Pageviews extends Metric("ga:pageviews")
case object TotalEvents extends Metric("ga:totalEvents")
}
sealed abstract class FilterOperator(val underlying: String) {
override def toString: String = underlying
}
object FilterOperator {
case object equals extends FilterOperator("==")
case object notEqual extends FilterOperator("!=")
case object greaterThan extends FilterOperator(">")
case object lessThan extends FilterOperator("<")
case object greaterThanEqual extends FilterOperator(">=")
case object lessThanEqual extends FilterOperator("<=")
case object containsSubstring extends FilterOperator("=@")
case object notContainsSubstring extends FilterOperator("!@")
case object matchRegex extends FilterOperator("=~")
case object notMatchRegex extends FilterOperator("!~")
case object and extends FilterOperator(";")
case object or extends FilterOperator(",")
}
|
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/OpportunitySpec.scala | <gh_stars>1-10
package kipsigman.ws.salesforce
import java.time._
import java.time.format.DateTimeFormatter
import java.util.Calendar
import OpportunityContactRole.Role
import Product._
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
class OpportunitySpec extends WordSpec with Matchers with SampleData {
private val futureContract = {
val startCal = Calendar.getInstance()
startCal.add(Calendar.DAY_OF_YEAR, 2)
val endCal = Calendar.getInstance()
endCal.add(Calendar.DAY_OF_YEAR, 367)
sampleContract.copy(startDate = startCal.getTime, endDate = endCal.getTime)
}
private val expiredContract = {
val startCal = Calendar.getInstance()
startCal.add(Calendar.DAY_OF_YEAR, -367)
val endCal = Calendar.getInstance()
endCal.add(Calendar.DAY_OF_YEAR, -2)
sampleContract.copy(startDate = startCal.getTime, endDate = endCal.getTime)
}
private def replaceProduct(oli: OpportunityLineItem, product: Product): OpportunityLineItem = {
val newPricebookEntry = oli.pricebookEntry.copy(product = product)
oli.copy(pricebookEntry = newPricebookEntry)
}
private def replaceProduct(opp: Opportunity, product: Product): Opportunity = {
val newOli = replaceProduct(opp.opportunityLineItems.head, product)
opp.copy(opportunityLineItems = Seq(newOli))
}
"JSON deserialization" should {
"read valid JSON" in {
sampleOpportunityJson.validate[Opportunity] match {
case s: JsSuccess[Opportunity] => {
val opportunity = s.get
opportunity shouldBe sampleOpportunity
opportunity.account shouldBe sampleAccount
opportunity.closeDate.format(DateTimeFormatter.ISO_DATE) shouldBe "2015-09-02"
opportunity.name shouldBe "New PSS"
opportunity.stage shouldBe Opportunity.Stage.ClosedWon
opportunity.contracts.get.head shouldBe sampleContract
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
"isClosedWon" should {
"return true for stage = ClosedWon" in {
sampleOpportunity.isClosedWon shouldBe true
}
"return false for other stages" in {
sampleOpportunity.copy(stage = Opportunity.Stage.ClosedLost).isClosedWon shouldBe false
sampleOpportunity.copy(stage = Opportunity.Stage.NeedsAnalysis).isClosedWon shouldBe false
sampleOpportunity.copy(stage = Opportunity.Stage.ValueProposition).isClosedWon shouldBe false
}
}
"isCurrent" should {
"return true for a contract that is current" in {
sampleOpportunity.isCurrent shouldBe true
sampleOpportunity.isCurrentOrFuture shouldBe true
}
"return false for Contract outside of now" in {
sampleOpportunity.copy(contracts = Option(Seq(futureContract))).isCurrent shouldBe false
sampleOpportunity.copy(contracts = Option(Seq(expiredContract))).isCurrent shouldBe false
}
}
"isCurrentOrFuture" should {
"return true for a contract that is current" in {
sampleOpportunity.isCurrentOrFuture shouldBe true
}
"return true for a contract that is future" in {
sampleOpportunity.copy(contracts = Option(Seq(futureContract))).isCurrentOrFuture shouldBe true
}
"return false for an expired Contract" in {
sampleOpportunity.copy(contracts = Option(Seq(expiredContract))).isCurrentOrFuture shouldBe false
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/ProductSpec.scala | <reponame>kipsigman/async-web-service-clients<gh_stars>1-10
package kipsigman.ws.salesforce
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
import OpportunityContactRole.Role
import Product._
class ProductSpec extends WordSpec with Matchers with SampleData {
"JSON deserialization" should {
"read valid JSON" in {
sampleProductJson.validate[Product] match {
case s: JsSuccess[Product] => {
val product2 = s.get
product2 shouldBe sampleProduct
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
} |
kipsigman/async-web-service-clients | google-api-clients/src/main/scala/kipsigman/ws/google/youtube/YouTubeApiClient.scala | package kipsigman.ws.google.youtube
import javax.inject.Inject
import javax.inject.Singleton
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import com.typesafe.config.Config
import kipsigman.ws.google.GoogleApiClient
import kipsigman.ws.google.Page
/**
* Google YouTube API client.
* Documentation:
* - https://developers.google.com/youtube/v3/getting-started
* - http://developers.google.com/apis-explorer/#p/youtube/v3/
*/
@Singleton
class YouTubeApiClient @Inject() (config: Config)(implicit ec: ExecutionContext) extends GoogleApiClient(config) {
private def defaultChannelId = config.getString("google.youTube.defaultChannelId")
private val searchUrl = "https://www.googleapis.com/youtube/v3/search"
private val videosUrl = "https://www.googleapis.com/youtube/v3/videos"
def findVideos(ids: Seq[String]): Future[Page[YouTubeVideo]] = {
val params =
Map("part" -> "id,snippet",
"id" -> ids.mkString(","))
executeGetPage[YouTubeVideo](videosUrl, params)(ec, YouTubeVideo.videosReads)
}
/**
* Find videos with paging functionality.
* Page tokens are 0 based index page numbers.
*/
def findVideos(ids: Seq[String], resultsPerPage: Int = Page.defaultResultsPerPage, pageToken: Option[String] = None): Future[Page[YouTubeVideo]] = {
// Determine page (0 index)
val pageIndex = pageToken match {
case Some(pageNumStr) => pageNumStr.toInt
case None => 0
}
// Validate page
try {
require(pageIndex >= 0, s"Invalid pageToken $pageToken")
} catch {
case t: Throwable => Future.apply(t)
}
// Choose videoIds based on paging
val indentedVideoIds = ids.drop(Page.offset(pageIndex, resultsPerPage))
val selectedVideoIds = indentedVideoIds.take(resultsPerPage)
logger.debug(s"ids=$ids")
logger.debug(s"indentedVideoIds=$indentedVideoIds")
logger.debug(s"selectedVideoIds=$selectedVideoIds")
// Choose page tokens
val prevPageToken = pageIndex match {
case 0 => None
case _ => Option((pageIndex.toInt - 1).toString)
}
val nextPageToken = indentedVideoIds.size match {
case indentedVideosSize if (indentedVideosSize > resultsPerPage) => Option((pageIndex + 1).toString)
case _ => None
}
logger.debug(s"prevPageToken=$prevPageToken, nextPageToken=$nextPageToken")
// Returns videos from ids but with no paging
val rawPageFuture = findVideos(selectedVideoIds)
// Add paging
rawPageFuture.map(rawPage => rawPage.copy(prevPageToken = prevPageToken, nextPageToken = nextPageToken))
}
def findPublicVideosByChannel(channelId: String = defaultChannelId, q: Option[String] = None, resultsPerPage: Int = Page.defaultResultsPerPage, pageToken: Option[String] = None): Future[Page[YouTubeVideo]] = {
val pageParams = pageToken match {
case Some(token) => Map("pageToken" -> token)
case None => Map[String, String]()
}
val searchParams = q match {
case Some(qValue) => Map("q" -> qValue)
case None => Map[String, String]()
}
val params =
Map("part" -> "id,snippet",
"channelId" -> channelId,
"type" -> "video",
"maxResults" -> resultsPerPage.toString,
"order" -> "date") ++ searchParams ++ pageParams
executeGetPage[YouTubeVideo](searchUrl, params)(ec, YouTubeVideo.searchReads)
}
} |
kipsigman/async-web-service-clients | google-api-clients/src/test/scala/kipsigman/ws/google/youtube/YouTubeVideoSpec.scala | package kipsigman.ws.google.youtube
import java.util.Date
import org.scalatest.Matchers
import org.scalatest.WordSpec
class YouTubeVideoSpec extends WordSpec with Matchers {
val video = YouTubeVideo(
"27IaHM1u6bg",
new Date(),
"Introduction to Case Classes",
"A walkthrough on how to use Scala Case Classes to reduce boilerplate code, avoid bugs, construct/modify objects, and use pattern matching."
)
"url" should {
"return URL with id" in {
video.url shouldBe "//youtu.be/27IaHM1u6bg"
}
}
"embedUrl" should {
"return URL with id" in {
video.embedUrl shouldBe "//www.youtube.com/embed/27IaHM1u6bg"
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/it/scala/kipsigman/ws/salesforce/SalesforceApiClientIntegrationSpec.scala | package kipsigman.ws.salesforce
import scala.concurrent._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import com.typesafe.config._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.Matchers
import org.scalatest.time._
import org.scalatest.WordSpec
import org.slf4j.LoggerFactory
trait SalesforceApiClientIntegrationSpec extends WordSpec with Matchers with ScalaFutures {
protected val logger = LoggerFactory.getLogger(this.getClass)
protected implicit val ec: ExecutionContext = ExecutionContext.Implicits.global
implicit override def patienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))
import java.util.Date
import java.text.SimpleDateFormat
@deprecated("Replace with java.time")
protected def formatDateTime(date: Date): String = {
val df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z")
df.format(date)
}
// client for test
private val config = ConfigFactory.load()
protected val client: SalesforceApiClient = new SalesforceApiClient(config)
// Temp AccessInfo for client requests
logger.info("Requesting AccessInfo")
protected implicit val accessInfo = Await.result(client.refreshAccessInfo, 5 seconds)
logger.info(s"accessInfo=$accessInfo")
// Sample Data
protected val sampleAccount = Account(Option("0018A000002KQ7bQAG"), "005C00000079DQSIA2", "Kip's Karma Kiosks", Option(Account.AccountType.Customer))
protected val sampleContactEmail = "<EMAIL>"
protected val sampleContactName = "<NAME>"
protected val sampleContractId = "8008A0000008znAQAQ"
protected val sampleOpportunityId = "0068A000001sxan"
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/main/scala/kipsigman/ws/salesforce/SalesforceService.scala | <filename>salesforce-api-client/src/main/scala/kipsigman/ws/salesforce/SalesforceService.scala
package kipsigman.ws.salesforce
import java.time.LocalDate
import scala.concurrent.Future
import OpportunityContactRole.Role
/**
* Interface for Salesforce access.
*/
trait SalesforceService {
def findAccountById(id: String): Future[Option[Account]]
def findAccountByEmail(email: String): Future[Option[Account]]
def findContactByEmail(email: String): Future[Option[Contact]]
def saveContact(contact: Contact): Future[Contact]
def updateContactCurrentSubscriber(contact: Contact, currentSubscriber: Boolean): Future[Contact]
def findContractById(id: String): Future[Option[Contract]]
def findOpportunityById(id: String): Future[Option[Opportunity]]
def findOpportunities(ids: Seq[String]): Future[Seq[Opportunity]]
def findOpportunityByContractId(contractId: String): Future[Option[Opportunity]]
def findExpiredOpportunities(startDate: LocalDate, endDate: LocalDate): Future[Seq[Opportunity]]
def findRecentlyExpiredOpportunities(lastNDays: Int): Future[Seq[Opportunity]]
def findCurrentOpportunitiesByEmail(email: String): Future[Seq[Opportunity]]
def findCurrentOrFutureOpportunitiesByAccount(accountId: String): Future[Seq[Opportunity]]
def findCurrentOrFutureOpportunitiesByEmail(email: String): Future[Seq[Opportunity]]
def findOpportunityContactRoleById(id: String): Future[Option[OpportunityContactRole]]
def findOpportunityContactRole(opportunityId: String, role: Role, email: String): Future[Option[OpportunityContactRole]]
def findOpportunityContactRoles(opportunityId: String, role: Role): Future[Seq[OpportunityContactRole]]
def deleteOpportunityContactRole(id: String): Future[Boolean]
def findProductById(id: String): Future[Option[Product]]
}
|
kipsigman/async-web-service-clients | salesforce-api-client/src/main/scala/kipsigman/ws/salesforce/SalesforceApiClient.scala | <reponame>kipsigman/async-web-service-clients
package kipsigman.ws.salesforce
import java.time._
import java.time.format.DateTimeFormatter
import java.util.concurrent.ExecutionException
import javax.inject.{ Inject, Singleton }
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import org.slf4j.LoggerFactory
import dispatch._
import play.api.http.HttpVerbs
import play.api.libs.json._
import play.api.libs.json.Reads._
import play.api.libs.functional.syntax._
import com.typesafe.config.Config
import OpportunityContactRole.Role
import kipsigman.ws.client._
case class SalesforceConfig(
apiVersion: String,
appId: String,
appSecret: String,
oauthHost: String,
passtoken: String,
user: String) {
def this(config: Config) = this(
config.getString("sfdc.apiVersion"),
config.getString("sfdc.appId"),
config.getString("sfdc.appSecret"),
config.getString("sfdc.oauthHost"),
config.getString("sfdc.passtoken"),
config.getString("sfdc.user")
)
lazy val oauthTokenUrl: String = s"$oauthHost/services/oauth2/token"
lazy val servicePath: String = s"/services/data/$apiVersion"
def serviceUrl(instanceUrl: String): String = instanceUrl + servicePath
}
case class AccessInfo(accessToken: String, instanceUrl: String)
object AccessInfo {
implicit val reads: Reads[AccessInfo] = (
(JsPath \ "access_token").read[String] and
(JsPath \ "instance_url").read[String]
)(AccessInfo.apply _)
}
/**
* Thrown when attempting to create a Contact with an email that belongs to an existing Contact.
*/
class ContactConflictException(msg: String) extends RuntimeException(msg)
/**
* Client for interfacing with the Salesforce API.
* Uses Salesforce Oauth 2 Username-Password flow:
* https://help.salesforce.com/apex/HTViewHelpDoc?id=remoteaccess_oauth_username_password_flow.htm&language=en_US
*/
@Singleton
class SalesforceApiClient @Inject() (config: Config)(protected implicit val ec: ExecutionContext) extends RestWebServiceClient {
private lazy val salesforceConfig = new SalesforceConfig(config)
private def serviceUrl(implicit accessInfo: AccessInfo) = salesforceConfig.serviceUrl(accessInfo.instanceUrl)
def refreshAccessInfo: Future[AccessInfo] = {
val postData = Map(
"grant_type" -> "password",
"client_id" -> salesforceConfig.appId,
"client_secret" -> salesforceConfig.appSecret,
"username" -> salesforceConfig.user,
"password" -> <PASSWORD>forceConfig.<PASSWORD>)
val req = url(salesforceConfig.oauthTokenUrl).secure.POST << postData
logger.debug(s"refreshAccessInfo: ${salesforceConfig.oauthTokenUrl} $postData")
executeJson(req, HttpVerbs.POST).map(jsValue => jsValue.get.as[AccessInfo])
}
private[salesforce] def authorizeReq(req: Req)(implicit accessInfo: AccessInfo): Req = {
req.addHeader("Authorization", s"Bearer ${accessInfo.accessToken}")
}
private[salesforce] def executeReq(baseReq: Req, method: String)(implicit accessInfo: AccessInfo): Future[Option[JsValue]] = {
logger.debug(s"executeReq: accessInfo=$accessInfo")
val req = prepareReq(baseReq, method, postProcess = authorizeReq)
executeJson(req, method)
}
private[salesforce] def executeGet(baseReq: Req)(implicit accessInfo: AccessInfo): Future[Option[JsValue]] = {
executeReq(baseReq, HttpVerbs.GET)
}
private[salesforce] def executeQuery[T](query: String)(implicit accessInfo: AccessInfo, rds: Reads[T]): Future[Seq[T]] = {
logger.debug(s"executeQuery: $query")
val qparams = Map("q" -> query)
val baseReq = url(s"$serviceUrl/query") <<? qparams
executeGet(baseReq) map {
case Some(jsValue) => (jsValue \ "records").as[Seq[T]]
case None => Seq()
}
}
private[salesforce] def executeWrite(baseReq: Req, jsValue: JsValue, method: String)(implicit accessInfo: AccessInfo): Future[Option[JsValue]] = {
val jsonString = Json.stringify(jsValue)
val reqWithData = baseReq << jsonString
executeReq(reqWithData, method)
}
private[salesforce] def executePatch(baseReq: Req, jsValue: JsValue)(implicit accessInfo: AccessInfo): Future[Option[JsValue]] = {
executeWrite(baseReq, jsValue, HttpVerbs.PATCH)
}
private[salesforce] def executePost(baseReq: Req, jsValue: JsValue)(implicit accessInfo: AccessInfo): Future[Option[JsValue]] = {
executeWrite(baseReq, jsValue, HttpVerbs.POST)
}
private[salesforce] def delete[T <: SalesforceEntity](id: String)(implicit accessInfo: AccessInfo, objectName: ObjectName[T]): Future[Boolean] = {
val urlString = objectURL[T](Option(id))
val baseReq = url(urlString)
val method = HttpVerbs.DELETE
executeReq(baseReq, method).map(jsValue => true) recover {
case t: Throwable => false
}
}
/**
* Creates or updates entity with Salesforce, returns Id.
*/
private[salesforce] def save[T <: SalesforceEntity](entity: T)(implicit accessInfo: AccessInfo, objectName: ObjectName[T], wrts: Writes[T]): Future[String] = {
val entityUrl = objectURL[T](entity.id)
val baseReq = url(entityUrl)
val jsValue = Json.toJson(entity)
logger.debug(s"save: url=$entityUrl, jsValue=$jsValue")
entity.id match {
case Some(id) => {
// Perform update
executePatch(baseReq, jsValue).map(jsValueOption => id)
}
case None => {
// Perform create
executePost(baseReq, jsValue).map(jsValueOption => (jsValueOption.get \ "id").as[String])
}
}
}
private[salesforce] def saveObjectFields[T <: SalesforceEntity](id: String, jsValue: JsValue)(implicit accessInfo: AccessInfo, objectName: ObjectName[T]): Future[String] = {
val entityUrl = objectURL[T](Option(id))
val baseReq = url(entityUrl)
executePatch(baseReq, jsValue).map(jsValueOption => id)
}
private[salesforce] def saveObjectStringField[T <: SalesforceEntity](id: String, field: String, value: String)(implicit accessInfo: AccessInfo, objectName: ObjectName[T]): Future[String] = {
saveObjectFields(id, Json.obj(field -> value))
}
private[salesforce] def saveObjectIntField[T <: SalesforceEntity](id: String, field: String, value: Int)(implicit accessInfo: AccessInfo, objectName: ObjectName[T]): Future[String] = {
saveObjectFields(id, Json.obj(field -> value))
}
private[salesforce] def findById[T <: SalesforceEntity](id: String)(implicit accessInfo: AccessInfo, objectName: ObjectName[T], objectReads: Reads[T]): Future[Option[T]] = {
val theUrl = objectURL[T](Option(id))
val req = url(theUrl)
executeGet(req).map(_.map(_.as[T]))
}
def findAccountById(id: String)(implicit accessInfo: AccessInfo): Future[Option[Account]] = {
findById[Account](id)
}
def findAccountByEmail(email: String)(implicit accessInfo: AccessInfo): Future[Option[Account]] = {
findContactByEmail(email).flatMap {
case Some(contact) => {
if (contact.account.id.isDefined)
findAccountById(contact.account.id.get)
else
Future.successful(None)
}
case None => Future.successful(None)
}
}
def findContactByEmail(email: String)(implicit accessInfo: AccessInfo): Future[Option[Contact]] = {
val query = s"""${Contact.selectClause} WHERE Email = '$email' ORDER BY CreatedDate"""
executeQuery[Contact](query).map(_.headOption)
}
def saveContact(contact: Contact)(implicit accessInfo: AccessInfo): Future[Contact] = {
val writes = contact.id match {
case Some(id) => Contact.updateWrites
case None => Contact.createWrites
}
save(contact)(accessInfo, Contact.objectName, writes).map(id => contact.copy(id = Option(id)))
}
def updateContactCurrentSubscriber(contact: Contact, currentSubscriber: Boolean)(implicit accessInfo: AccessInfo): Future[Contact] = {
val updatedContact = contact.copy(currentSubscriber = currentSubscriber)
saveContact(contact)
}
def findContractById(id: String)(implicit accessInfo: AccessInfo): Future[Option[Contract]] = {
// Doing a query instead of an object lookup to get the Account joined fields
val query = s"""${Contract.selectClause} WHERE Id = '$id'"""
executeQuery[Contract](query).map(_.headOption)
}
/**
* Finds Contracts which expired between startDate & endDate.
*/
private[salesforce] def findExpiredContracts(startDate: LocalDate, endDate: LocalDate)(implicit accessInfo: AccessInfo): Future[Seq[Contract]] = {
val startDateStr = startDate.format(DateTimeFormatter.ISO_DATE)
val endDateStr = endDate.format(DateTimeFormatter.ISO_DATE)
logger.debug(s"findExpiredContracts($startDateStr, $endDateStr)")
val query = s"""${Contract.selectClause}
WHERE EndDate >= ${startDateStr} AND EndDate < ${endDateStr}
ORDER BY EndDate
"""
executeQuery[Contract](query)
}
private[salesforce] def findRecentlyExpiredContracts(lastNDays: Int)(implicit accessInfo: AccessInfo): Future[Seq[Contract]] = {
val query = s"""${Contract.selectClause}
WHERE EndDate = LAST_N_DAYS:$lastNDays
ORDER BY EndDate
"""
executeQuery[Contract](query)
}
def findOpportunityById(id: String)(implicit accessInfo: AccessInfo): Future[Option[Opportunity]] = {
val query = s"""${Opportunity.selectClause} WHERE Id = '$id'"""
executeQuery[Opportunity](query).map(_.headOption)
}
def findOpportunities(ids: Seq[String])(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val opportunityIdsStr = ids.mkString("'", "','", "'")
val query = s"""${Opportunity.selectClause}
WHERE Id IN ($opportunityIdsStr)
ORDER BY CloseDate
"""
executeQuery[Opportunity](query)
}
def findOpportunityByContractId(contractId: String)(implicit accessInfo: AccessInfo): Future[Option[Opportunity]] = {
val contractOptionFuture = findContractById(contractId)
contractOptionFuture flatMap {
case Some(contract) => {
findOpportunityById(contract.opportunityId)
}
case None => Future.successful(None)
}
}
private[salesforce] def findRecentlyUpdatedClosedWonOpportunities(sinceDateTime: ZonedDateTime)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val sinceStr = sinceDateTime.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)
val query = s"""${Opportunity.selectClause}
WHERE IsClosed = true and IsWon = true AND LastModifiedDate > $sinceStr
ORDER BY LastModifiedDate
"""
executeQuery[Opportunity](query)
}
def findExpiredOpportunities(startDate: LocalDate, endDate: LocalDate)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val opportunitiesFuture = findExpiredContracts(startDate, endDate).flatMap(contracts => {
val opportunityIds = contracts.map(_.opportunityId)
findOpportunities(opportunityIds)
})
opportunitiesFuture.map(_.filter(opp => opp.isClosedWon))
}
def findRecentlyExpiredOpportunities(lastNDays: Int)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val opportunitiesFuture = findRecentlyExpiredContracts(lastNDays).flatMap(contracts => {
val opportunityIds = contracts.map(_.opportunityId)
findOpportunities(opportunityIds)
})
opportunitiesFuture.map(_.filter(opp => opp.isClosedWon))
}
private[salesforce] def findOpportunitiesForAccount(accountId: String)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val query = s"""${Opportunity.selectClause}
WHERE AccountId = '$accountId'
ORDER BY CloseDate
"""
executeQuery[Opportunity](query)
}
private[salesforce] def findClosedWonOpportunitiesByAccount(accountId: String)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val query = s"""${Opportunity.selectClause}
WHERE IsClosed = true and IsWon = true AND Account.Id = '$accountId'
ORDER BY CloseDate
"""
executeQuery[Opportunity](query)
}
/**
* Finds Opportunities for a given contact email that are Closed Won and have a Subscription product
*/
private[salesforce] def findOpportunitiesByEmail(email: String)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
val opportunitiesFuture = findOpportunityContactRolesByEmail(email).flatMap(opportunityContactRoles => {
val opportunityIds = opportunityContactRoles.map(_.opportunityId)
findOpportunities(opportunityIds)
})
opportunitiesFuture.map(_.filter(opp => opp.isClosedWon))
}
def findCurrentOpportunitiesByEmail(email: String)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
findOpportunitiesByEmail(email).map(_.filter(_.isCurrent))
}
def findCurrentOrFutureOpportunitiesByAccount(accountId: String)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
findClosedWonOpportunitiesByAccount(accountId).map(_.filter(_.isCurrentOrFuture))
}
def findCurrentOrFutureOpportunitiesByEmail(email: String)(implicit accessInfo: AccessInfo): Future[Seq[Opportunity]] = {
findOpportunitiesByEmail(email).map(_.filter(_.isCurrentOrFuture))
}
/**
* TODO: Use another field than Sherpa timestamp
* Resets the Sherpa Timestamp on an Opportunity which will cause a save and
* trigger Sherpa to process the Opportunity & Named Contacts.
* Note: Sherpa Timestamp isn't read by anything, it's just a field to update.
*/
private[salesforce] def touchOpportunity(opportunityId: String)(implicit accessInfo: AccessInfo): Unit = {
val entityUrl = objectURL[Opportunity](Option(opportunityId))
val now = ZonedDateTime.now().format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)
val jsValue = Json.obj("Sherpa_Timestamp__c" -> now)
val baseReq = url(entityUrl)
executePatch(baseReq, jsValue)
}
def findOpportunityContactRoleById(id: String)(implicit accessInfo: AccessInfo): Future[Option[OpportunityContactRole]] = {
val query = s"${OpportunityContactRole.selectClause} WHERE Id = '$id'"
executeQuery[OpportunityContactRole](query).map(_.headOption)
}
def findOpportunityContactRole(opportunityId: String, role: Role, email: String)(implicit accessInfo: AccessInfo): Future[Option[OpportunityContactRole]] = {
val query = OpportunityContactRole.selectClause +
s" WHERE OpportunityId='$opportunityId' AND Role='${role.name}' AND Contact.Email='$email'"
executeQuery[OpportunityContactRole](query).map(_.headOption)
}
def findOpportunityContactRoles(opportunityId: String, role: Role)(implicit accessInfo: AccessInfo): Future[Seq[OpportunityContactRole]] = {
val query = s"""${OpportunityContactRole.selectClause} WHERE OpportunityId = '$opportunityId' AND Role='${role.name}' ORDER BY Contact.LastName, Contact.FirstName"""
executeQuery[OpportunityContactRole](query)
}
private[salesforce] def findOpportunityContactRolesByEmail(email: String)(implicit accessInfo: AccessInfo): Future[Seq[OpportunityContactRole]] = {
val query = s"""${OpportunityContactRole.selectClause} WHERE Contact.Email = '$email'"""
executeQuery[OpportunityContactRole](query)
}
private[salesforce] def saveOpportunityContactRole(ocr: OpportunityContactRole)(implicit accessInfo: AccessInfo): Future[OpportunityContactRole] = {
ocr.id match {
case Some(id) => {
save(ocr)(accessInfo, OpportunityContactRole.objectName, OpportunityContactRole.updateWrites).map(id => ocr)
}
case None => {
save(ocr)(accessInfo, OpportunityContactRole.objectName, OpportunityContactRole.createWrites).map(id => ocr.copy(id = Option(id)))
}
}
}
def deleteOpportunityContactRole(id: String)(implicit accessInfo: AccessInfo): Future[Boolean] = {
findOpportunityContactRoleById(id) flatMap {
case Some(ocr) => {
val successFuture = delete[OpportunityContactRole](id)
successFuture.foreach(if (_) {
touchOpportunity(ocr.opportunityId)
updateContactCurrentSubscriber(ocr.contact, false)
})
successFuture
}
case None => Future.successful(false)
}
}
def findProductById(id: String)(implicit accessInfo: AccessInfo): Future[Option[Product]] = {
findById[Product](id)
}
private[salesforce] def objectURL[T <: SalesforceEntity](id: Option[String])(implicit accessInfo: AccessInfo, objectName: ObjectName[T]) = {
s"${serviceUrl}/sobjects/${objectName.raw}" + id.map(idVal => s"/$idVal").getOrElse("")
}
} |
kipsigman/async-web-service-clients | google-api-clients/src/it/scala/kipsigman/ws/google/youtube/YouTubeApiClientIntegrationSpec.scala | <filename>google-api-clients/src/it/scala/kipsigman/ws/google/youtube/YouTubeApiClientIntegrationSpec.scala
package kipsigman.ws.google.youtube
import org.scalatest.Finders
import org.scalatest.Matchers
import org.scalatest.WordSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.Millis
import org.scalatest.time.Seconds
import org.scalatest.time.Span
import org.slf4j.LoggerFactory
import com.typesafe.config.ConfigFactory
class YouTubeApiClientIntegrationSpec extends WordSpec with Matchers with ScalaFutures {
private val logger = LoggerFactory.getLogger(this.getClass)
// implicit ExecutionContext
import scala.concurrent.ExecutionContext.Implicits.global
implicit override def patienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(100, Millis))
val config = ConfigFactory.load()
val repository = new YouTubeApiClient(config)
// Test data (Kip Sigman channel)
val testChannelId = "UCHSFaued2BlLtVlTda3JyNw"
val testVideoIds = Seq("27IaHM1u6bg", "FdJiy42ExEM")
"findPublicVideos" should {
"return public videos for default channel" in {
val pageFuture = repository.findPublicVideosByChannel(channelId = testChannelId, resultsPerPage = 5)
whenReady(pageFuture) { page =>
page.resultsPerPage shouldBe 5
page.totalResults should be > 5
page.items.size shouldBe 5
page.prevPageToken shouldBe None
page.nextPageToken shouldBe Some("CAUQAA")
}
val page2Future = repository.findPublicVideosByChannel(channelId = testChannelId, pageToken = Some("CAUQAA"))
whenReady(page2Future) { page =>
page.items.size shouldBe 2
page.prevPageToken shouldBe Some("CAUQAQ")
page.nextPageToken shouldBe None
}
}
"return public videos for default channel with search term" in {
val pageFuture = repository.findPublicVideosByChannel(channelId = testChannelId, q = Some("case classes"))
whenReady(pageFuture) { page =>
page.items.size shouldBe 1
page.resultsPerPage shouldBe 10
page.totalResults should be > 0
}
}
}
"findVideos" should {
"return videos for ids" in {
val pageFuture = repository.findVideos(testVideoIds)
whenReady(pageFuture) { page =>
page.resultsPerPage shouldBe testVideoIds.size
page.totalResults shouldBe testVideoIds.size
page.items.size shouldBe testVideoIds.size
page.prevPageToken shouldBe None
page.nextPageToken shouldBe None
}
}
}
} |
kipsigman/async-web-service-clients | google-api-clients/src/test/scala/kipsigman/ws/google/analytics/AnalyticsApiClientSpec.scala | package kipsigman.ws.google.analytics
import java.time.LocalDate
import org.scalatest.Matchers
import org.scalatest.WordSpec
class AnalyticsApiClientSpec extends WordSpec with Matchers {
"Date.fromLocalDate" should {
"format a LocalDate using ISO" in {
val localDate = LocalDate.of(2016, 1, 28)
Date.fromLocalDate(localDate).toString shouldBe "2016-01-28"
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/main/scala/kipsigman/ws/salesforce/actor/SalesforceActorService.scala | <reponame>kipsigman/async-web-service-clients
package kipsigman.ws.salesforce.actor
import java.time.LocalDate
import javax.inject.{ Inject, Singleton }
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import akka.actor.ActorSystem
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.Config
import kipsigman.ws.salesforce._
import OpportunityContactRole.Role
import SalesforceActor.ApiRequest
/**
* Service for interfacing indirectly with the Salesforce API via an Actor.
*/
@Singleton
class SalesforceActorService @Inject() (actorSystem: ActorSystem, config: Config) extends SalesforceService {
import actorSystem.dispatcher
implicit val timeout = Timeout(15 seconds)
val actorRef = SalesforceActor(actorSystem, config)
override def findAccountById(id: String): Future[Option[Account]] = {
val req = ApiRequest((repository, accessInfo) => repository.findAccountById(id)(accessInfo))
(actorRef ? req).mapTo[Option[Account]]
}
override def findAccountByEmail(email: String): Future[Option[Account]] = {
val req = ApiRequest((repository, accessInfo) => repository.findAccountByEmail(email)(accessInfo))
(actorRef ? req).mapTo[Option[Account]]
}
override def findContactByEmail(email: String): Future[Option[Contact]] = {
val req = ApiRequest((repository, accessInfo) => repository.findContactByEmail(email)(accessInfo))
(actorRef ? req).mapTo[Option[Contact]]
}
override def saveContact(contact: Contact): Future[Contact] = {
val req = ApiRequest((repository, accessInfo) => repository.saveContact(contact)(accessInfo))
(actorRef ? req).mapTo[Contact]
}
override def updateContactCurrentSubscriber(contact: Contact, currentSubscriber: Boolean): Future[Contact] = {
val req = ApiRequest((repository, accessInfo) => repository.updateContactCurrentSubscriber(contact, currentSubscriber)(accessInfo))
(actorRef ? req).mapTo[Contact]
}
override def findContractById(id: String): Future[Option[Contract]] = {
val req = ApiRequest((repository, accessInfo) => repository.findContractById(id)(accessInfo))
(actorRef ? req).mapTo[Option[Contract]]
}
override def findOpportunityById(id: String): Future[Option[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findOpportunityById(id)(accessInfo))
(actorRef ? req).mapTo[Option[Opportunity]]
}
override def findOpportunities(ids: Seq[String]): Future[Seq[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findOpportunities(ids)(accessInfo))
(actorRef ? req).mapTo[Seq[Opportunity]]
}
override def findOpportunityByContractId(contractId: String): Future[Option[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findOpportunityByContractId(contractId)(accessInfo))
(actorRef ? req).mapTo[Option[Opportunity]]
}
override def findExpiredOpportunities(startDate: LocalDate, endDate: LocalDate): Future[Seq[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findExpiredOpportunities(startDate, endDate)(accessInfo))
(actorRef ? req).mapTo[Seq[Opportunity]]
}
override def findRecentlyExpiredOpportunities(lastNDays: Int): Future[Seq[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findRecentlyExpiredOpportunities(lastNDays)(accessInfo))
(actorRef ? req).mapTo[Seq[Opportunity]]
}
override def findCurrentOpportunitiesByEmail(email: String): Future[Seq[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findCurrentOpportunitiesByEmail(email)(accessInfo))
(actorRef ? req).mapTo[Seq[Opportunity]]
}
override def findCurrentOrFutureOpportunitiesByAccount(accountId: String): Future[Seq[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findCurrentOrFutureOpportunitiesByAccount(accountId)(accessInfo))
(actorRef ? req).mapTo[Seq[Opportunity]]
}
override def findCurrentOrFutureOpportunitiesByEmail(email: String): Future[Seq[Opportunity]] = {
val req = ApiRequest((repository, accessInfo) => repository.findCurrentOrFutureOpportunitiesByEmail(email)(accessInfo))
(actorRef ? req).mapTo[Seq[Opportunity]]
}
override def findOpportunityContactRoleById(id: String): Future[Option[OpportunityContactRole]] = {
val req = ApiRequest((repository, accessInfo) => repository.findOpportunityContactRoleById(id)(accessInfo))
(actorRef ? req).mapTo[Option[OpportunityContactRole]]
}
override def findOpportunityContactRole(opportunityId: String, role: Role, email: String): Future[Option[OpportunityContactRole]] = {
val req = ApiRequest((repository, accessInfo) => repository.findOpportunityContactRole(opportunityId, role, email)(accessInfo))
(actorRef ? req).mapTo[Option[OpportunityContactRole]]
}
override def findOpportunityContactRoles(opportunityId: String, role: Role): Future[Seq[OpportunityContactRole]] = {
val req = ApiRequest((repository, accessInfo) => repository.findOpportunityContactRoles(opportunityId, role)(accessInfo))
(actorRef ? req).mapTo[Seq[OpportunityContactRole]]
}
override def deleteOpportunityContactRole(id: String): Future[Boolean] = {
val req = ApiRequest((repository, accessInfo) => repository.deleteOpportunityContactRole(id)(accessInfo))
(actorRef ? req).mapTo[Boolean]
}
override def findProductById(id: String): Future[Option[Product]] = {
val req = ApiRequest((repository, accessInfo) => repository.findProductById(id)(accessInfo))
(actorRef ? req).mapTo[Option[Product]]
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/SalesforceConfigSpec.scala | package kipsigman.ws.salesforce
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.typesafe.config._
class SalesforceConfigSpec extends WordSpec with Matchers with SampleData {
val config = ConfigFactory.load()
val sfc = new SalesforceConfig(config)
"properties" should {
"match app/library config" in {
sfc.apiVersion shouldBe config.getString("sfdc.apiVersion")
sfc.appId shouldBe config.getString("sfdc.appId")
sfc.appSecret shouldBe config.getString("sfdc.appSecret")
sfc.oauthHost shouldBe config.getString("sfdc.oauthHost")
sfc.passtoken shouldBe config.getString("sfdc.passtoken")
sfc.user shouldBe config.getString("sfdc.user")
sfc.servicePath shouldBe s"/services/data/${sfc.apiVersion}"
}
}
"oauthTokenUrl" should {
"return oauthHost + path" in {
sfc.oauthTokenUrl shouldBe "https://test.salesforce.com/services/oauth2/token"
}
}
"serviceUrl" should {
"return instance + path" in {
val instanceUrl = "https://cs45.salesforce.com"
sfc.serviceUrl(instanceUrl) shouldBe s"${instanceUrl}${sfc.servicePath}"
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/OpportunityLineItemSpec.scala | <filename>salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/OpportunityLineItemSpec.scala
package kipsigman.ws.salesforce
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
import Product._
class OpportunityLineItemSpec extends WordSpec with Matchers with SampleData {
"JSON deserialization" should {
"read valid JSON" in {
sampleOpportunityLineItemJson.validate[OpportunityLineItem] match {
case s: JsSuccess[OpportunityLineItem] => {
val oli = s.get
oli shouldBe sampleOpportunityLineItem
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
"productDisplayText" should {
"return Product.name and quantity if not 1" in {
val product = Product(Option("01tC0000003W5oFIAS"), "1 Year Subscription", ProductFamily.A)
val pricebookEntry = PricebookEntry(Option("01uC0000008zQcyIAE"), product)
val oli = OpportunityLineItem(Option("00kM0000007CAsDIAW"), 2, pricebookEntry)
oli.productDisplayText shouldBe "1 Year Subscription (2)"
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/main/scala/kipsigman/ws/salesforce/Models.scala | <reponame>kipsigman/async-web-service-clients
package kipsigman.ws.salesforce
import java.time._
import java.util.Date
import play.api.data.validation.ValidationError
import play.api.libs.functional.syntax._
import play.api.libs.json._
import play.api.libs.json.Reads._
trait SalesforceEntity {
def id: Option[String]
}
class ObjectName[T <: SalesforceEntity](val raw: String) {
override def toString = raw
}
trait ApiMetadata[T <: SalesforceEntity] {
implicit val objectName: ObjectName[T]
implicit val reads: Reads[T]
val selectFields: Seq[String]
final def selectFieldsStr: String = selectFields.mkString(", ")
final def objectPrefixedSelectFields: Seq[String] = selectFields.map(field => s"${objectName.raw}.$field")
/**
* Override for complex objects with sub-selects
*/
def selectClause: String = s"SELECT $selectFieldsStr FROM $objectName"
}
case class Account(id: Option[String], ownerId: String, name: String, accountType: Option[Account.AccountType]) extends SalesforceEntity
object Account extends ApiMetadata[Account] {
override implicit val objectName = new ObjectName[Account]("Account")
override implicit val reads: Reads[Account] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "OwnerId").read[String] and
(JsPath \ "Name").read[String] and
(JsPath \ "Type").readNullable[AccountType](AccountType.reads)
)(Account.apply _)
val updateWrites: Writes[Account] = (
(JsPath \ "Name").write[String] and
(JsPath \ "Type").writeNullable[String]
)((account: Account) => (account.name, account.accountType.map(_.name)))
override val selectFields = Seq("Id", "OwnerId", "Name", "Type")
sealed abstract class AccountType(val name: String) {
override def toString: String = name
}
object AccountType {
case object Analyst extends AccountType("Analyst")
case object Competitor extends AccountType("Competitor")
case object Customer extends AccountType("Customer")
case object Integrator extends AccountType("Integrator")
case object Investor extends AccountType("Investor")
case object Partner extends AccountType("Partner")
case object Press extends AccountType("Press")
case object Prospect extends AccountType("Prospect")
case object Reseller extends AccountType("Reseller")
case object Other extends AccountType("Other")
case class Unknown(override val name: String) extends AccountType(name)
val all: Set[AccountType] = Set(
Analyst, Competitor, Customer, Integrator,
Investor, Partner, Press, Prospect,
Reseller, Other
)
def apply(name: String): AccountType = {
val stage = all.find(s => s.name == name)
stage.getOrElse(Unknown(name))
}
implicit val reads = new Reads[AccountType] {
def reads(json: JsValue) = json match {
case JsString(s) => JsSuccess(AccountType(s))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.jsstring"))))
}
}
}
}
case class Contact(
id: Option[String],
account: Account,
firstName: String,
lastName: String,
email: String,
phone: Option[String],
currentSubscriber: Boolean = false) extends SalesforceEntity {
def this(account: Account, contactData: ContactData) = {
this(None, account, contactData.firstName, contactData.lastName, contactData.email, contactData.phone)
}
def update(contactData: ContactData): Contact =
this.copy(firstName = contactData.firstName, lastName = contactData.lastName, email = contactData.email, phone = contactData.phone)
val name = s"$firstName $lastName"
}
object Contact extends ApiMetadata[Contact] {
override implicit val objectName = new ObjectName[Contact]("Contact")
implicit val reads: Reads[Contact] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "Account").read[Account] and
(JsPath \ "FirstName").read[String] and
(JsPath \ "LastName").read[String] and
(JsPath \ "Email").read[String] and
(JsPath \ "Phone").readNullable[String] and
(JsPath \ "Current_Subscriber__c").read[Boolean]
)(Contact.apply _)
val createWrites: Writes[Contact] = (
(JsPath \ "AccountId").write[String] and
(JsPath \ "OwnerId").write[String] and
(JsPath \ "FirstName").write[String] and
(JsPath \ "LastName").write[String] and
(JsPath \ "Email").write[String] and
(JsPath \ "Phone").writeNullable[String] and
(JsPath \ "Current_Subscriber__c").write[Boolean]
)((contact: Contact) => (contact.account.id.get, contact.account.ownerId, contact.firstName, contact.lastName, contact.email, contact.phone, contact.currentSubscriber))
val updateWrites: Writes[Contact] = (
(JsPath \ "FirstName").write[String] and
(JsPath \ "LastName").write[String] and
(JsPath \ "Email").write[String] and
(JsPath \ "Phone").writeNullable[String] and
(JsPath \ "Current_Subscriber__c").write[Boolean]
)((contact: Contact) => (contact.firstName, contact.lastName, contact.email, contact.phone, contact.currentSubscriber))
override val selectFields = Seq("Id") ++ Account.objectPrefixedSelectFields ++ Seq("FirstName", "LastName", "Email", "Phone", "Current_Subscriber__c")
}
case class ContactData(
firstName: String = "",
lastName: String = "",
email: String = "",
phone: Option[String] = None)
case class Contract(
id: Option[String],
opportunityId: String,
startDate: Date,
endDate: Date) extends SalesforceEntity {
def isCurrentOrFuture = endDate.after(new Date())
def isCurrent = {
val now = new Date()
startDate.before(now) && endDate.after(now)
}
def isExpired = !isCurrentOrFuture
}
object Contract extends ApiMetadata[Contract] {
override implicit val objectName = new ObjectName[Contract]("Contract")
// Note: Date strings are probably in PST from Salesforce, but system is UTC. So there may be a 7-8 hour discrepancy when using default Reads.
// This can be fixed by creating a custom DateReads that converts the time zone.
implicit val reads: Reads[Contract] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "Opportunity__c").read[String] and
(JsPath \ "StartDate").read[Date] and
(JsPath \ "EndDate").read[Date]
)(Contract.apply _)
override val selectFields = Seq("Id", "Opportunity__c", "StartDate", "EndDate")
}
case class Opportunity(
id: Option[String],
account: Account,
closeDate: LocalDate,
name: String,
stage: Opportunity.Stage,
contracts: Option[Seq[Contract]],
opportunityLineItems: Seq[OpportunityLineItem]) extends SalesforceEntity {
lazy val contract: Option[Contract] = contracts.flatMap(_.headOption)
lazy val isInSalesPipeline: Boolean = {
import Opportunity.Stage._
stage match {
case ClosedLost => false
case Unknown(name) => false
case _ => true
}
}
lazy val isClosedWon: Boolean = stage == Opportunity.Stage.ClosedWon
def isCurrent: Boolean = {
isClosedWon && (contract match {
case Some(contract) => contract.isCurrent
case None => false
})
}
def isCurrentOrFuture: Boolean = {
isClosedWon && (contract match {
case Some(contract) => contract.isCurrentOrFuture
case None => false
})
}
def isExpired: Boolean = {
isClosedWon && (contract match {
case Some(contract) => contract.isExpired
case None => true
})
}
lazy val products: Set[Product] = opportunityLineItems.map(_.pricebookEntry.product).toSet
lazy val productsDisplayText = opportunityLineItems.map(oli => oli.productDisplayText).mkString(", ")
}
object Opportunity extends ApiMetadata[Opportunity] {
implicit val objectName = new ObjectName[Opportunity]("Opportunity")
implicit val reads: Reads[Opportunity] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "Account").read[Account] and
(JsPath \ "CloseDate").read[LocalDate] and
(JsPath \ "Name").read[String] and
(JsPath \ "StageName").read[Stage](Stage.reads) and
(JsPath \ "Contracts__r" \ "records").readNullable[Seq[Contract]] and
(JsPath \ "OpportunityLineItems" \ "records").read[Seq[OpportunityLineItem]]
)(Opportunity.apply _)
// Only define primary select fields, child collections defined in SELECT statement
override val selectFields =
Seq("Id") ++
Account.objectPrefixedSelectFields ++
Seq("CloseDate", "Name", "StageName")
override def selectClause = s"SELECT ${selectFieldsStr}" +
s", (SELECT ${Contract.selectFieldsStr} FROM Contracts__r)" +
s", (SELECT ${OpportunityLineItem.selectFieldsStr} FROM OpportunityLineItems)" +
s" FROM ${objectName}"
sealed abstract class Stage(val name: String) {
override def toString: String = name
}
object Stage {
// Open
case object Prospecting extends Stage("Prospecting")
case object Qualification extends Stage("Qualification")
case object NeedsAnalysis extends Stage("Needs Analysis")
case object ValueProposition extends Stage("Value Proposition")
case object IdDecisionMakers extends Stage("ID Decision Makers")
case object PerceptionAnalysis extends Stage("Perception Analysis")
case object ProposalPriceQuote extends Stage("Proposal/Price Quote")
case object NegotiationReview extends Stage("Negotiation/Review")
// Closed
case object ClosedWon extends Stage("Closed Won")
case object ClosedLost extends Stage("Closed Lost")
final case class Unknown(override val name: String) extends Stage(name)
val stages: Set[Stage] = Set(
Prospecting, Qualification, NeedsAnalysis, ValueProposition,
IdDecisionMakers, PerceptionAnalysis, ProposalPriceQuote, NegotiationReview,
ClosedLost, ClosedWon
)
def apply(name: String): Stage = {
val stage = stages.find(s => s.name == name)
stage.getOrElse(Unknown(name))
}
implicit val reads = new Reads[Stage] {
def reads(json: JsValue) = json match {
case JsString(s) => JsSuccess(Stage(s))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.jsstring"))))
}
}
}
}
case class OpportunityContactRole(
id: Option[String],
opportunityId: String,
role: OpportunityContactRole.Role,
isPrimary: Boolean,
contact: Contact) extends SalesforceEntity
object OpportunityContactRole extends ApiMetadata[OpportunityContactRole] {
implicit val objectName = new ObjectName[OpportunityContactRole]("OpportunityContactRole")
implicit val reads: Reads[OpportunityContactRole] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "OpportunityId").read[String] and
(JsPath \ "Role").read[Role](Role.reads) and
(JsPath \ "IsPrimary").read[Boolean] and
(JsPath \ "Contact").read[Contact]
)(OpportunityContactRole.apply _)
val createWrites: Writes[OpportunityContactRole] = (
(JsPath \ "OpportunityId").write[String] and
(JsPath \ "Role").write[String] and
(JsPath \ "IsPrimary").write[Boolean] and
(JsPath \ "ContactId").write[String]
)((ocr: OpportunityContactRole) => (ocr.opportunityId, ocr.role.name, ocr.isPrimary, ocr.contact.id.get))
val updateWrites: Writes[OpportunityContactRole] = (
(JsPath \ "Role").write[String] and
(JsPath \ "IsPrimary").write[Boolean] and
(JsPath \ "ContactId").write[String]
)((ocr: OpportunityContactRole) => (ocr.role.name, ocr.isPrimary, ocr.contact.id.get))
override val selectFields = Seq("Id", "OpportunityId", "Role", "IsPrimary") ++ Contact.objectPrefixedSelectFields
sealed abstract class Role(val name: String) {
override def toString: String = name
}
object Role {
case object BusinessUser extends Role("Business User")
case object DecisionMaker extends Role("Decision Maker")
case object EconomicBuyer extends Role("Economic Buyer")
case object EconomicDecisionMaker extends Role("Economic Decision Maker")
case object Evaluator extends Role("Evaluator")
case object ExecutiveSponsor extends Role("Executive Sponsor")
case object Influencer extends Role("Influencer")
case object TechnicalBuyer extends Role("Technical Buyer")
case object Other extends Role("Other")
case class Unknown(override val name: String) extends Role(name)
val all: Set[Role] = Set(
BusinessUser,
DecisionMaker,
EconomicBuyer,
EconomicDecisionMaker,
Evaluator,
ExecutiveSponsor,
Influencer,
TechnicalBuyer,
Other)
def apply(name: String): Role = {
val role = all.find(role => role.name == name)
role.getOrElse(Unknown(name))
}
implicit val reads = new Reads[Role] {
def reads(json: JsValue) = json match {
case JsString(s) => JsSuccess(Role(s))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.jsstring"))))
}
}
}
}
case class OpportunityLineItem(id: Option[String], quantity: Int, pricebookEntry: PricebookEntry) extends SalesforceEntity {
val productDisplayText = quantity match {
case 1 => pricebookEntry.product.name
case q => s"${pricebookEntry.product.name} ($q)"
}
}
object OpportunityLineItem extends ApiMetadata[OpportunityLineItem] {
implicit val objectName = new ObjectName[OpportunityLineItem]("OpportunityLineItem")
implicit val reads: Reads[OpportunityLineItem] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "Quantity").read[Int] and
(JsPath \ "PricebookEntry").read[PricebookEntry]
)(OpportunityLineItem.apply _)
override val selectFields = Seq("Id", "Quantity") ++ PricebookEntry.objectPrefixedSelectFields
}
case class PricebookEntry(id: Option[String], product: Product) extends SalesforceEntity
object PricebookEntry extends ApiMetadata[PricebookEntry] {
implicit val objectName = new ObjectName[PricebookEntry]("PricebookEntry")
implicit val reads: Reads[PricebookEntry] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "Product2").read[Product]
)(PricebookEntry.apply _)
override val selectFields = Seq("Id") ++ Product.objectPrefixedSelectFields
}
case class Product(
id: Option[String],
name: String,
family: Product.ProductFamily) extends SalesforceEntity
object Product extends ApiMetadata[Product] {
implicit val objectName = new ObjectName[Product]("Product2")
implicit val reads: Reads[Product] = (
(JsPath \ "Id").readNullable[String] and
(JsPath \ "Name").read[String] and
(JsPath \ "Family").read[ProductFamily](ProductFamily.reads)
)(Product.apply _)
override val selectFields = Seq("Id", "Name", "Family")
sealed abstract class ProductFamily(val name: String) {
override def toString: String = name
}
object ProductFamily {
case object A extends ProductFamily("")
case object B extends ProductFamily("B")
final case class Other(override val name: String) extends ProductFamily(name)
val all: Set[ProductFamily] = Set(A, B)
def apply(name: String): ProductFamily = {
val productFamilyOption = all.find(productFamily => productFamily.name == name)
productFamilyOption.getOrElse(Other(name))
}
implicit val reads = new Reads[ProductFamily] {
def reads(json: JsValue) = json match {
case JsString(s) => JsSuccess(ProductFamily(s))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.jsstring"))))
}
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/ContactSpec.scala | package kipsigman.ws.salesforce
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
class ContactSpec extends WordSpec with Matchers with SampleData {
"selectFields" should {
"return the correct representations" in {
Contact.selectFields shouldBe Seq("Id", "Account.Id", "Account.OwnerId", "Account.Name", "Account.Type", "FirstName", "LastName", "Email", "Phone", "Current_Subscriber__c")
Contact.selectFieldsStr shouldBe "Id, Account.Id, Account.OwnerId, Account.Name, Account.Type, FirstName, LastName, Email, Phone, Current_Subscriber__c"
Contact.objectPrefixedSelectFields shouldBe Seq("Contact.Id", "Contact.Account.Id", "Contact.Account.OwnerId", "Contact.Account.Name", "Contact.Account.Type", "Contact.FirstName", "Contact.LastName", "Contact.Email", "Contact.Phone", "Contact.Current_Subscriber__c")
}
}
"JSON deserialization" should {
"read valid JSON" in {
sampleContactJson.validate[Contact] match {
case s: JsSuccess[Contact] => {
val contact = s.get
contact shouldBe sampleContact
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
"name" should {
"concatenate firstName and lastName" in {
sampleContact.name shouldBe "<NAME>"
}
}
"update" should {
"replace non-reference fields" in {
val contactData = ContactData("Johnny", "Utah", "<EMAIL>", Option("(805) 867-5309"))
val updatedContact = sampleContact.update(contactData)
updatedContact.id shouldBe sampleContact.id
updatedContact.account shouldBe sampleContact.account
updatedContact.firstName shouldBe contactData.firstName
updatedContact.lastName shouldBe contactData.lastName
updatedContact.email shouldBe contactData.email
updatedContact.phone shouldBe contactData.phone
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/test/scala/kipsigman/ws/salesforce/AccountSpec.scala | <reponame>kipsigman/async-web-service-clients<gh_stars>1-10
package kipsigman.ws.salesforce
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.libs.json._
class AccountSpec extends WordSpec with Matchers with SampleData {
"JSON deserialization" should {
"read valid JSON" in {
sampleAccountJson.validate[Account] match {
case s: JsSuccess[Account] => {
val account = s.get
account shouldBe sampleAccount
}
case e: JsError => {
fail(s"error=$e")
}
}
}
}
} |
kipsigman/async-web-service-clients | salesforce-api-client/src/main/scala/kipsigman/ws/salesforce/actor/SalesforceActor.scala | <reponame>kipsigman/async-web-service-clients
package kipsigman.ws.salesforce.actor
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.existentials
import akka.pattern.{ ask, pipe }
import akka.actor._
import com.typesafe.config.Config
import kipsigman.ws.salesforce._
private[actor] object SalesforceActor {
case class ApiRequest[A](f: (SalesforceApiClient, AccessInfo) => Future[A])
/**
* Represents a failure in a request actor due to invalid AccessInfo. Includes the request/sender
* that was being processed during the failure, as it needs to be re-processed.
*/
case class AccessInfoInvalid(failedReq: ApiRequest[_], failedAskSender: ActorRef)
case object RetryRequestAccessInfo
def props(repository: SalesforceApiClient): Props =
Props(classOf[SalesforceActor], repository)
def apply(actorSystem: ActorSystem, config: Config, name: String = "salesforce"): ActorRef = {
val repository = new SalesforceApiClient(config)(actorSystem.dispatcher)
actorSystem.actorOf(props(repository), name = name)
}
}
/**
* Supervisor Actor to handle requests to the SalesforceRepository. Delegates each request
* to a new a SalesforceRequestActor.
*/
private[actor] class SalesforceActor(repository: SalesforceApiClient) extends Actor with Stash with ActorLogging {
import SalesforceActor._
import context.dispatcher
var accessInfo: AccessInfo = _
override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy
override def preStart(): Unit = {
log.debug("preStart")
requestAccessInfo
}
override def receive = initializing
def initializing: Receive = {
case AccessInfoInvalid(failedReq, failedAskSender) => {
// Already waiting for new AccessInfo, this must be from an additional child with invalid info
// Put request that failed back in mailbox to be re-processed
self.tell(failedReq, failedAskSender)
}
case newAccessInfo: AccessInfo => {
accessInfo = newAccessInfo
log.debug(s"New accessInfo=$accessInfo")
log.debug("become processing")
context.become(processing)
unstashAll()
}
case failure: Status.Failure => {
// Assume this was an error from requesting AccessInfo
log.warning(s"receive Status.Failure, retrying requestAccessInfo in 1 minute", failure.cause)
context.system.scheduler.scheduleOnce(60 seconds, self, RetryRequestAccessInfo)
}
case RetryRequestAccessInfo => {
requestAccessInfo
}
case req: ApiRequest[_] => stash()
case msg: Any => log.warning(s"Unexpected message: $msg")
}
def processing: Receive = {
case AccessInfoInvalid(failedReq, failedAskSender) => {
log.debug("AccessInfoInvalid, become initializing")
context.become(initializing)
// Request new AccessInfo
requestAccessInfo
// Put request that failed back in mailbox to be re-processed
self.tell(failedReq, failedAskSender)
}
case req: ApiRequest[_] => {
log.debug("processing ApiRequest")
val askSender = context.sender
// To test access info expiration
// val flakyInfo =
// if (scala.util.Random.nextInt(2) == 0) AccessInfo("bogus", accessInfo.instanceUrl)
// else accessInfo
// context.actorOf(Props(new SalesforceRequestActor(repository, flakyInfo, req, askSender)))
// Delegate to child.
context.actorOf(Props(new SalesforceRequestActor(repository, accessInfo, req, askSender)))
}
case msg: Any => log.warning(s"Unexpected message: $msg")
}
/**
* Request accessInfo from Salesforce.
*/
private def requestAccessInfo: Unit = {
val accessInfoFuture = repository.refreshAccessInfo
// piperesult of request to self, will be handled in "initialization"
pipe(accessInfoFuture)(context.dispatcher) to self
}
}
/**
* Actor to handle a single request. Actor is stopped after processing its first message.
*/
private[actor] class SalesforceRequestActor(repository: SalesforceApiClient,
accessInfo: AccessInfo,
req: SalesforceActor.ApiRequest[_],
askSender: ActorRef) extends Actor with ActorLogging {
import context.dispatcher
import SalesforceActor._
override def preStart(): Unit = {
log.debug("preStart")
// make request to repository, receive will handle piped result
val resultFuture = req.f(repository, accessInfo)
pipe(resultFuture) to context.self
}
override def receive = {
case failure: Status.Failure => {
log.debug(s"receive Status.Failure cause=${failure.cause}")
if (failure.cause.getMessage().contains("401") || failure.cause.getMessage().contains("Session expired")) {
// Assume accessInfo has expired
log.debug(s"Expired/Invalid accessInfo=$accessInfo")
context.parent ! AccessInfoInvalid(req, askSender)
} else {
askSender ! failure
}
context.stop(self)
}
case msg: Any => {
// Assume this is the result piped back to self
log.debug(s"receive msg:$msg")
askSender ! msg
context.stop(self)
}
}
override def postStop(): Unit = {
log.debug("postStop")
}
}
|
kipsigman/async-web-service-clients | salesforce-api-client/src/it/scala/kipsigman/ws/salesforce/SalesforceApiClientWritesIntegrationSpec.scala | package kipsigman.ws.salesforce
import org.scalatest.time._
import org.scalatest.WordSpec
import OpportunityContactRole.Role
class SalesforceApiClientWritesIntegrationSpec extends SalesforceApiClientIntegrationSpec {
private def cleanupData = {
client.saveObjectStringField[Account](sampleAccount.id.get, "Type", Account.AccountType.Customer.name)
}
"cleanupDataPre" should {
"cleanupData" in {
cleanupData
1 + 1 shouldBe 2
}
}
// "saveObjectStringField" should {
// "update object field" in {
// client.saveObjectStringField[Account](sampleAccount.id.get, "Type", "IT value")
// }
// }
//
// "touchOpportunity" should {
// "update timestamp" in {
// client.touchOpportunity(sampleOpportunityId)
// 1 + 1 shouldBe 2
// }
// }
// "add new Contact & OpportunityContactRole" should {
// "create new entities" in {
// val contactData = ContactData("Ben", "Harp", "<EMAIL>", Option("555-555-5555"))
// val ocrFuture = client.findOpportunityById(sampleOpportunityId).flatMap(opp => {
// client.addOpportunityNamedContact(opp.get, contactData)
// })
// val ocrOptionFuture = ocrFuture.flatMap(ocr => {
// client.findOpportunityContactRoleById(ocr.id.get)
// })
// whenReady(ocrOptionFuture) { ocrOption =>
// ocrOption shouldBe defined
//
// // Delete to clean up
// //client.deleteOpportunityContactRole(ocrOption.get.id.get)
// }
// }
// }
"cleanupDataPost" should {
"cleanupData" in {
cleanupData
1 + 1 shouldBe 2
}
}
} |
kipsigman/async-web-service-clients | web-service-client/src/test/scala/kipsigman/ws/client/RestWebServiceClientSpec.scala | <gh_stars>1-10
package kipsigman.ws.client
import scala.concurrent.ExecutionContext
import dispatch._
import org.mockito.Mockito.when
import org.scalatest.Matchers
import org.scalatest.WordSpec
import org.scalatest.mock.MockitoSugar
import org.slf4j.LoggerFactory
import play.api.http._
import play.api.libs.json._
class RestWebServiceClientSpec extends WordSpec with Matchers with MockitoSugar {
private val logger = LoggerFactory.getLogger(this.getClass)
val client = new TestClient
"prepareReq" should {
"set Accept and Content-Type for JSON" in {
val sampleUrl = "https://www.googleapis.com/language/translate/v2"
val baseReq = url(sampleUrl)
val preparedReq = client.prepareReq(baseReq, HttpVerbs.GET)
val headers = preparedReq.toRequest.getHeaders
headers.get("Accept").get(0) shouldBe "application/json; charset=utf-8"
headers.get("Content-Type").get(0) shouldBe "application/json; charset=utf-8"
preparedReq.toRequest.getMethod shouldBe HttpVerbs.GET
preparedReq.toRequest.getUrl shouldBe sampleUrl
}
}
}
class TestClient extends RestWebServiceClient {
protected implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global
} |
kipsigman/async-web-service-clients | project/Dependencies.scala | import sbt._
object Dependencies {
lazy val playVersion = "2.5.3"
lazy val akkaVersion = "2.4.4"
lazy val akka = Seq(
"com.typesafe.akka" %% "akka-actor" % akkaVersion
)
lazy val di = Seq(
"javax.inject" % "javax.inject" % "1",
"com.google.inject" % "guice" % "4.0"
)
lazy val play = Seq(
"com.typesafe.play" %% "play" % playVersion,
"com.typesafe.play" %% "play-json" % playVersion
)
lazy val dispatch = Seq(
"net.databinder.dispatch" %% "dispatch-core" % "0.11.3"
)
lazy val scalaTest = Seq(
"org.scalatest" %% "scalatest" % "2.2.6" % "test,it",
"org.mockito" % "mockito-core" % "1.10.19" % "test,it"
)
}
|
hmrc/currency-conversion | test/uk/gov/hmrc/currencyconversion/connectors/HODConnectorSpec.scala | <filename>test/uk/gov/hmrc/currencyconversion/connectors/HODConnectorSpec.scala
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.connectors
import com.codahale.metrics.SharedMetricRegistries
import com.github.tomakehurst.wiremock.client.MappingBuilder
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.http.Fault
import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.test.Helpers._
import play.api.test.Injecting
import uk.gov.hmrc.http.HeaderCarrier
import org.scalatest.matchers.must.Matchers
import org.scalatest.freespec.AnyFreeSpec
import uk.gov.hmrc.currencyconversion.utils.WireMockHelper
class HODConnectorSpec extends AnyFreeSpec with Matchers with GuiceOneAppPerSuite with WireMockHelper
with ScalaFutures with IntegrationPatience with Injecting with BeforeAndAfterEach {
override def beforeEach(): Unit = {
SharedMetricRegistries.clear()
}
override lazy val app: Application = {
new GuiceApplicationBuilder()
.configure(
"microservice.services.des.port" -> server.port(),
"microservice.services.des.circuit-breaker.max-failures" -> 1,
"microservice.services.des.circuit-breaker.reset-timeout" -> "1 second"
)
.build()
}
private lazy val connector: HODConnector = inject[HODConnector]
private implicit val hc: HeaderCarrier = HeaderCarrier()
private def stubCall: MappingBuilder =
post(urlEqualTo("/passengers/exchangerequest/xrs/getexchangerate/v1"))
"hod connector" - {
"must call the HOD when xrs worker thread is started" in {
server.stubFor(
stubCall
.willReturn(aResponse().withStatus(OK))
)
connector.submit().futureValue.status mustBe OK
}
"must fall back to a 503 (SERVICE_UNAVAILABLE) when the downstream call errors" in {
server.stubFor(
stubCall
.willReturn(aResponse().withFault(Fault.RANDOM_DATA_THEN_CLOSE))
.willReturn(aResponse().withStatus(SERVICE_UNAVAILABLE))
)
connector.submit().futureValue.status mustBe SERVICE_UNAVAILABLE
}
"must fail fast while the circuit breaker is open when Xrs call is triggered" in {
server.stubFor(
stubCall
.willReturn(aResponse().withFault(Fault.RANDOM_DATA_THEN_CLOSE))
.willReturn(aResponse().withStatus(INSUFFICIENT_STORAGE))
)
connector.submit().futureValue.status mustBe SERVICE_UNAVAILABLE
}
}
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/models/ExchangeRateObject.scala | /*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.models
import play.api.libs.json.{Format, JsObject, OFormat, OWrites, Reads, __}
import uk.gov.hmrc.mongo.play.json.formats.MongoJavatimeFormats
import java.time.Instant
final case class ExchangeRateObject(fileName: String, exchangeRateData: JsObject)
object ExchangeRateObject {
implicit val formatInstant: Format[Instant] = MongoJavatimeFormats.instantFormat
implicit lazy val reads: Reads[ExchangeRateObject] = {
import play.api.libs.functional.syntax._
(
(__ \ "_id").read[String] and
(__ \ "exchangeRateData").read[JsObject]
)(ExchangeRateObject.apply _)
}
implicit lazy val writes: OWrites[ExchangeRateObject] = {
import play.api.libs.functional.syntax._
(
(__ \ "_id").write[String] and
(__ \ "exchangeRateData").write[JsObject]
)(unlift(ExchangeRateObject.unapply))
}
implicit val format: OFormat[ExchangeRateObject] = OFormat(
reads,
writes
)
}
|
hmrc/currency-conversion | test/uk/gov/hmrc/currencyconversion/controllers/ExchangeRateControllerSpec.scala | /*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.controllers
import com.codahale.metrics.SharedMetricRegistries
import org.mockito.Mockito
import org.mockito.Mockito.doReturn
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers.convertToAnyShouldWrapper
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.http.Status
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.{JsArray, JsObject, Json}
import play.api.test.FakeRequest
import play.api.test.Helpers._
import uk.gov.hmrc.currencyconversion.repositories.ExchangeRateRepository
import play.api.Application
import uk.gov.hmrc.currencyconversion.models.ExchangeRateObject
import java.time.LocalDate
import scala.concurrent.Future._
import scala.language.postfixOps
import org.scalatest.wordspec.AnyWordSpecLike
import play.api.mvc.Result
import scala.concurrent.Future
class ExchangeRateControllerSpec extends AnyWordSpecLike with GuiceOneAppPerSuite with MockitoSugar with BeforeAndAfterEach {
private lazy val exchangeRateRepository = mock[ExchangeRateRepository]
override def beforeEach(): Unit = {
Mockito.reset(exchangeRateRepository)
val exchangeRate : ExchangeRateObject = ExchangeRateObject("exrates-monthly-0919", Json.parse(data).as[JsObject])
doReturn(false) when exchangeRateRepository isDataPresent "exrates-monthly-0919"
doReturn(successful(Some(exchangeRate))) when exchangeRateRepository get "exrates-monthly-0919"
SharedMetricRegistries.clear()
}
override lazy val app: Application = {
import play.api.inject._
new GuiceApplicationBuilder()
.overrides(
bind[ExchangeRateRepository].toInstance(exchangeRateRepository)
)
.build()
}
val data: String =
"""{
| "timestamp" : "2019-06-28T13:17:21Z",
| "correlationid" : "c4a81105-9417-4080-9cd2-c4469efc965c",
| "exchangeRates" : [
| {
| "validFrom" : "2019-09-01",
| "validTo" : "2019-09-30",
| "currencyCode" : "USD",
| "exchangeRate" : 1.213,
| "currencyName" : "United State"
| },
| {
| "validFrom" : "2019-09-01",
| "validTo" : "2019-09-30",
| "currencyCode" : "INR",
| "exchangeRate" : 1,
| "currencyName" : "India"
| }
| ]
|}""".stripMargin
"Getting rates for a valid date and 1 valid currency" must {
"return 200 and the correct json" in {
val result = await(route(app, FakeRequest("GET", "/currency-conversion/rates/2019-09-10?cc=USD")).get)
result.header.status shouldBe OK
result.header.headers.get("Warning") shouldBe None
contentAsJson(Future.successful(result)) shouldBe Json.arr(
Json.obj("startDate" -> "2019-09-01", "endDate" -> "2019-09-30", "currencyCode" -> "USD", "rate" -> "1.213")
)
Thread.sleep(2000.toLong)
}
"return 200 and the correct json with scaling 2 decimal at least" in {
val result: Result = await(route(app, FakeRequest("GET", "/currency-conversion/rates/2019-09-10?cc=INR")).get)
result.header.status shouldBe OK
result.header.headers.get("Warning") shouldBe None
contentAsJson(Future.successful(result)) shouldBe Json.arr(
Json.obj("startDate" -> "2019-09-01", "endDate" -> "2019-09-30", "currencyCode" -> "INR", "rate" -> "1.00")
)
Thread.sleep(2000.toLong)
}
}
"Getting rates for a valid date and 1 invalid currency" must {
"return 200 and the correct json" in {
val result = await(route(app, FakeRequest("GET", "/currency-conversion/rates/2019-09-10?cc=INVALID")).get)
result.header.status shouldBe OK
result.header.headers.get("Warning") shouldBe None
contentAsJson(Future.successful(result)) shouldBe Json.arr(
Json.obj("startDate" -> "2019-09-01", "endDate" -> "2019-09-30", "currencyCode" -> "INVALID")
)
}
}
"Getting rates for a valid date and 1 valid currency and 1 invalid currency" must {
"return 200 and the correct json" in {
val result = await(route(app, FakeRequest("GET", "/currency-conversion/rates/2019-09-10?cc=USD&cc=INVALID")).get)
result.header.status shouldBe OK
result.header.headers.get("Warning") shouldBe None
contentAsJson(Future.successful(result)).as[JsArray].value(0).as[JsObject].keys shouldBe Set("startDate", "endDate", "currencyCode", "rate")
contentAsJson(Future.successful(result)).as[JsArray].value(1).as[JsObject].keys shouldBe Set("startDate", "endDate", "currencyCode")
}
}
"Getting rates for a date which has no rates Json file and a valid currency code" must {
"return 200 and the correct json" in {
doReturn(true) when exchangeRateRepository isDataPresent "exrates-monthly-1019"
val result = route(app, FakeRequest("GET", "/currency-conversion/rates/2019-10-10?cc=USD")).get
status(result) shouldBe Status.OK
contentAsJson(result).as[JsArray].value(0).as[JsObject].keys shouldBe Set("startDate", "endDate", "currencyCode", "rate")
}
}
"Getting rates for a date which has no rates Json file, 1 valid currency code and 1 invalid currency code" must {
"return response from previous month" in {
doReturn(true) when exchangeRateRepository isDataPresent "exrates-monthly-1019"
val result = route(app, FakeRequest("GET", "/currency-conversion/rates/2019-10-10?cc=USD&cc=INVALID")).get
status(result) shouldBe Status.OK
}
}
"Getting rates for last day of the month and 1 valid currency code" must {
"return 200 and the correct json which gets rate from file of the same month" in {
val result = route(app, FakeRequest("GET", "/currency-conversion/rates/2019-09-30?cc=USD")).get
status(result) shouldBe Status.OK
contentAsJson(result) shouldBe Json.arr(
Json.obj("startDate" -> "2019-09-01", "endDate" -> "2019-09-30", "currencyCode" -> "USD", "rate" -> "1.213")
)
}
}
"Getting rates for first day of the month and 1 valid currency code" must {
"return 200 and the correct json which gets rate from file of the same month" in {
val result = route(app, FakeRequest("GET", "/currency-conversion/rates/2019-09-01?cc=USD")).get
status(result) shouldBe Status.OK
contentAsJson(result) shouldBe Json.arr(
Json.obj("startDate" -> "2019-09-01", "endDate" -> "2019-09-30", "currencyCode" -> "USD", "rate" -> "1.213")
)
}
}
"Getting rates for an invalid date" must {
"return 400 and the correct json" in {
val result = route(app, FakeRequest("GET", "/currency-conversion/rates/INVALID-DATE?cc=USD")).get
status(result) shouldBe Status.BAD_REQUEST
contentAsJson(result) shouldBe Json.obj("statusCode" -> 400, "message" -> "bad request, cause: REDACTED")
}
}
"Getting currencies for a valid date" must {
"return 200 and the correct json" in {
val exchangeRate : ExchangeRateObject = ExchangeRateObject("exrates-monthly-0919", Json.parse(data).as[JsObject])
doReturn(false) when exchangeRateRepository isDataPresent "exrates-monthly-0919"
doReturn(successful(Some(exchangeRate))) when exchangeRateRepository get "exrates-monthly-0919"
val result = route(app, FakeRequest("GET", "/currency-conversion/currencies/2019-09-01")).get
status(result) shouldBe Status.OK
contentAsJson(result).as[JsObject].keys shouldBe Set("start", "end", "currencies")
}
}
"Getting currencies for an invalid date" must {
"return 400" in {
val result = route(app, FakeRequest("GET", "/currency-conversion/currencies/INVALID-DATE")).get
status(result) shouldBe Status.BAD_REQUEST
}
}
"Getting currencies for a date which does not exist" must {
"return 200 if fallback is available" in {
val exchangeRate : ExchangeRateObject = ExchangeRateObject("exrates-monthly-0919", Json.parse(data).as[JsObject])
doReturn(false) when exchangeRateRepository isDataPresent "exrates-monthly-0919"
doReturn(successful(Some(exchangeRate))) when exchangeRateRepository get "exrates-monthly-0919"
val date = LocalDate.of(2019.toInt, 9.toInt, 22.toInt)
val result = route(app, FakeRequest("GET", s"/currency-conversion/currencies/$date")).get
status(result) shouldBe Status.OK
contentAsJson(result).as[JsObject].keys shouldBe Set("start", "end", "currencies")
}
}
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/repositories/ConversionRatePeriodJson.scala | /*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.repositories
import akka.stream.Materializer
import play.api.libs.json.JsSuccess
import uk.gov.hmrc.currencyconversion.models.{ConversionRatePeriod, Currency, CurrencyPeriod, ExchangeRateData}
import java.time.LocalDate
import play.api.i18n.Lang.logger.logger
import javax.inject.Inject
import play.api.libs.json.OFormat.oFormatFromReadsAndOWrites
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.postfixOps
class ConversionRatePeriodJson @Inject()(writeExchangeRateRepository: ExchangeRateRepository)
(implicit ec: ExecutionContext, m: Materializer) extends ConversionRatePeriodRepository {
def getExchangeRateFileName(date : LocalDate) : String = {
val targetFileName = "exrates-monthly-%02d".format(date.getMonthValue) +
date.getYear.toString.substring(2)
if (!writeExchangeRateRepository.isDataPresent(targetFileName)) {
targetFileName
} else {
logger.info(s"$targetFileName is not present")
"empty"
}
}
def getExchangeRatesData(filePath: String) : Future[ExchangeRateData] = {
writeExchangeRateRepository.get(filePath)
.map {
case response if response.isEmpty =>
logger.error(s"XRS_FILE_CANNOT_BE_READ_ERROR [ConversionRatePeriodJson] Exchange rate file is not able to read")
throw new RuntimeException("Exchange rate data is not able to read.")
case response =>
response.get.exchangeRateData.validate[ExchangeRateData] match {
case JsSuccess(seq, _) =>
seq
case _ => {
logger.error(s"XRS_FILE_CANNOT_BE_READ_ERROR [ConversionRatePeriodJson] Exchange rate data mapping is failed")
throw new RuntimeException("Exchange rate data mapping is failed")
}
}
}
}
def getExchangeRates(filePath: String) : Future[Map[String, Option[BigDecimal]]] = {
def getMinimumDecimalScale(rate: BigDecimal): BigDecimal = {
if (rate.scale < 2) rate.setScale(2) else rate
}
getExchangeRatesData(filePath).map { exchangeRates =>
exchangeRates.exchangeData.flatMap { data =>
Map(data.currencyCode -> Some(getMinimumDecimalScale(data.exchangeRate)))
}.toMap
}
}
def getCurrencies(filePath: String): Future[Seq[Currency]] = {
getExchangeRatesData(filePath).map { exchangeRates =>
exchangeRates.exchangeData map { data =>
Currency("", data.currencyCode, data.currencyName)
}
}
}
def getConversionRatePeriod(date: LocalDate): Future[Option[ConversionRatePeriod]] = {
val fileName = getExchangeRateFileName(date)
if (fileName.equals("empty")) {
Future.successful(None)
} else {
getExchangeRates(fileName).map { rates =>
Some(ConversionRatePeriod(date.withDayOfMonth(1), date.withDayOfMonth(date.lengthOfMonth()), None, rates))
}
}
}
def getLatestConversionRatePeriod(date: LocalDate): Future[ConversionRatePeriod] = {
val fileName = getExchangeRateFileName(date)
if (fileName.equals("empty")) {
logger.error(s"XRS_FILE_NOT_AVAILABLE_ERROR [ConversionRatePeriodJson] Exchange rate file is not available.")
Future.failed(new RuntimeException("Exchange rate file is not able to read."))
} else {
getExchangeRates(fileName).map { rates =>
ConversionRatePeriod(date.withDayOfMonth(1), date.withDayOfMonth(date.lengthOfMonth()), None, rates)
}
}
}
def getCurrencyPeriod(date: LocalDate): Future[Option[CurrencyPeriod]] = {
getCurrencies(getExchangeRateFileName(date)).map { currencies =>
Some(CurrencyPeriod(date.withDayOfMonth(1), date.withDayOfMonth(date.lengthOfMonth()), currencies))
}
}
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/services/ExchangeRateService.scala | /*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.services
import java.time.LocalDate
import java.time.temporal.TemporalAdjusters.lastDayOfMonth
import javax.inject.Inject
import play.api.libs.json._
import uk.gov.hmrc.currencyconversion.models._
import uk.gov.hmrc.currencyconversion.repositories.ConversionRatePeriodRepository
import scala.concurrent.{ExecutionContext, Future}
class ExchangeRateService @Inject()(exchangeRateRepository: ConversionRatePeriodRepository)(implicit ec: ExecutionContext) {
def getRates(date: LocalDate, currencyCodes: List[String]): Future[List[ExchangeRateResult]] = {
val conversionRatePeriod = exchangeRateRepository.getConversionRatePeriod(date)
Future.sequence {
currencyCodes.map { currencyCode =>
conversionRatePeriod.flatMap {
case Some(crp) =>
crp.rates.get(currencyCode) match {
case Some(rate) => Future.successful(ExchangeRateSuccessResult(Json.obj("startDate" -> crp.startDate,
"endDate" -> crp.endDate, "currencyCode" -> currencyCode, "rate" -> rate.map(_.toString()))))
case None => Future.successful(ExchangeRateSuccessResult(Json.obj("startDate" -> crp.startDate,
"endDate" -> crp.endDate, "currencyCode" -> currencyCode)))
}
case None => exchangeRateRepository.getLatestConversionRatePeriod(date.minusMonths(1)).map { fallbackCrp =>
fallbackCrp.rates.get(currencyCode) match {
case Some(rate) => ExchangeRateOldFileResult(Json.obj("startDate" -> fallbackCrp.startDate,
"endDate" -> fallbackCrp.endDate, "currencyCode" -> currencyCode, "rate" -> rate.map(_.toString())))
case None => ExchangeRateOldFileResult(Json.obj("startDate" -> fallbackCrp.startDate,
"endDate" -> fallbackCrp.endDate, "currencyCode" -> currencyCode))
}
}
}
}
}
}
def getCurrencies(date: LocalDate): Future[Option[CurrencyPeriod]] = {
exchangeRateRepository.getCurrencyPeriod(date).flatMap {
case Some(value) => Future.successful(Some(value))
case None =>
val fallbackDate = date.minusMonths(1).`with`(lastDayOfMonth()) //lastDay of previous month
exchangeRateRepository.getCurrencyPeriod(fallbackDate)
}
}
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/connectors/HttpDate.scala | /*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.connectors
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter}
import org.joda.time.{DateTime, DateTimeZone}
trait HttpDate {
protected val dateFormatter: DateTimeFormatter =
DateTimeFormat
.forPattern("EEE, dd MMM yyyy HH:mm:ss 'GMT'")
.withZone(DateTimeZone.UTC)
def now: String =
dateFormatter.print(DateTime.now.withZone(DateTimeZone.UTC))
}
|
hmrc/currency-conversion | build.sbt | import TestPhases.oneForkedJvmPerTest
import scoverage.ScoverageKeys
import uk.gov.hmrc.DefaultBuildSettings.addTestReportOption
import uk.gov.hmrc.sbtdistributables.SbtDistributablesPlugin.publishingSettings
import scala.util.matching.Regex
val appName = "currency-conversion"
lazy val microservice = Project(appName, file("."))
.enablePlugins(play.sbt.PlayScala, SbtAutoBuildPlugin, SbtDistributablesPlugin)
.settings(
libraryDependencies ++= AppDependencies.compile ++ AppDependencies.test(),
retrieveManaged := true,
evictionWarningOptions in update := EvictionWarningOptions.default.withWarnScalaVersionEviction(false)
)
.settings(
publishingSettings: _*
)
.settings(scalaVersion := "2.12.12")
.configs(IntegrationTest)
.settings(inConfig(IntegrationTest)(Defaults.itSettings): _*)
.settings(
Keys.fork in IntegrationTest := false,
unmanagedSourceDirectories in IntegrationTest := (baseDirectory in IntegrationTest) (base => Seq(base / "it")).value,
testGrouping in IntegrationTest := oneForkedJvmPerTest((definedTests in IntegrationTest).value),
parallelExecution in IntegrationTest := false,
addTestReportOption(IntegrationTest, "int-test-reports"),
routesImport ++= Seq("uk.gov.hmrc.currencyconversion.binders.DateBinder._", "java.time._")
)
.settings(majorVersion := 1)
.settings(resolvers += Resolver.jcenterRepo)
.settings(resolvers += Resolver.typesafeRepo("releases"))
.settings(
ScoverageKeys.coverageExcludedFiles := "<empty>;Reverse.*;.*Routes.*;",
ScoverageKeys.coverageMinimum := 80
)
.settings(silencerSettings)
.settings(PlayKeys.playDefaultPort := 9016)
lazy val silencerSettings: Seq[Setting[_]] = {
val paramValueNeverUsed: Regex = """^(parameter value)(.*)(is never used)$""".r
val unusedImports: Regex = """^(Unused import*)$""".r
val silencerVersion = "1.7.1"
Seq(
libraryDependencies ++= Seq(
compilerPlugin("com.github.ghik" % "silencer-plugin" % silencerVersion cross CrossVersion.full),
"com.github.ghik" % "silencer-lib" % silencerVersion % Provided cross CrossVersion.full
),
// silence warnings on autogenerated files
scalacOptions += "-P:silencer:pathFilters=target/.*",
// silence implicit parameter value is never used warnings
scalacOptions += s"-P:silencer:globalFilters=$paramValueNeverUsed",
scalacOptions += s"-P:silencer:globalFilters=$unusedImports",
// exclude warnings for the project directories, i.e. make builds reproducible
scalacOptions += s"-P:silencer:sourceRoots=${baseDirectory.value.getCanonicalPath}"
)
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/config/DesCircuitBreakerProvider.scala | <gh_stars>0
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.config
import akka.actor.ActorSystem
import akka.pattern.CircuitBreaker
import com.google.inject.{Inject, Provider, Singleton}
import play.api.Configuration
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration
@Singleton
class DesCircuitBreakerProvider @Inject()(config: Configuration)
(implicit ec: ExecutionContext, sys: ActorSystem) extends Provider[CircuitBreaker] {
private val maxFailures = config.get[Int]("microservice.services.des.circuit-breaker.max-failures")
private val callTimeout = config.get[FiniteDuration]("microservice.services.des.circuit-breaker.call-timeout")
private val resetTimeout = config.get[FiniteDuration]("microservice.services.des.circuit-breaker.reset-timeout")
override def get(): CircuitBreaker =
new CircuitBreaker(
scheduler = sys.scheduler,
maxFailures = maxFailures,
callTimeout = callTimeout,
resetTimeout = resetTimeout
)
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/connectors/HODConnector.scala | /*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.connectors
import akka.pattern.CircuitBreaker
import com.google.inject.name.Named
import com.google.inject.Inject
import play.api.http.{ContentTypes, HeaderNames}
import play.api.Configuration
import play.api.http.Status.SERVICE_UNAVAILABLE
import play.api.libs.json.{JsValue, Json}
import uk.gov.hmrc.http.{HeaderCarrier, HttpResponse,HttpClient}
import uk.gov.hmrc.currencyconversion.models.Service
import java.util.UUID
import javax.inject.Singleton
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class HODConnector @Inject() (
http: HttpClient,
config: Configuration,
@Named("des") circuitBreaker: CircuitBreaker
)(implicit ec: ExecutionContext) extends HttpDate {
private val bearerToken = config.get[String]("microservice.services.des.bearer-token")
private val baseUrl = config.get[Service]("microservice.services.des")
private val xrsEndPoint = config.get[String]("microservice.services.des.endpoint")
private val environment = config.get[String]("microservice.services.des.environment")
private val CORRELATION_ID: String = "X-Correlation-ID"
private val ENVIRONMENT: String = "Environment"
def submit(): Future[HttpResponse] = {
implicit val hc: HeaderCarrier = {
HeaderCarrier()
.withExtraHeaders(
HeaderNames.ACCEPT -> ContentTypes.JSON,
HeaderNames.CONTENT_TYPE -> ContentTypes.JSON,
HeaderNames.DATE -> now,
HeaderNames.AUTHORIZATION -> s"Bearer $bearerToken",
CORRELATION_ID -> UUID.randomUUID.toString,
ENVIRONMENT -> environment
)
}
def call (implicit hc: HeaderCarrier): Future[HttpResponse] =
http.POST[JsValue, HttpResponse](s"$baseUrl$xrsEndPoint", Json.parse("""{}"""))
circuitBreaker.withCircuitBreaker(call)
.fallbackTo(Future.successful(HttpResponse(SERVICE_UNAVAILABLE, s"Fall back response from $baseUrl$xrsEndPoint")))
}
}
|
hmrc/currency-conversion | test/uk/gov/hmrc/currencyconversion/workers/XrsExchangeRateRequestWorkerSpec.scala | <gh_stars>0
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.workers
import com.codahale.metrics.SharedMetricRegistries
import com.github.tomakehurst.wiremock.client.WireMock._
import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}
import org.scalatest.{BeforeAndAfterEach, OptionValues}
import org.scalatestplus.mockito.MockitoSugar
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.test.Helpers._
import uk.gov.hmrc.currencyconversion.utils.WireMockHelper
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class XrsExchangeRateRequestWorkerSpec extends AnyWordSpec with Matchers
with ScalaFutures with IntegrationPatience with OptionValues with MockitoSugar with WireMockHelper with Eventually {
lazy val builder: GuiceApplicationBuilder = new GuiceApplicationBuilder()
.configure(
"workers.xrs-exchange-rate.interval" -> "1 second",
"microservice.services.des.port" -> server.port()
)
private val mockedJsonResponse =
"""{"timestamp":"2021-06-15T15:41:38Z",
|"correlationid":"72a89d23-0fc6-4212-92fc-ea8b05139c76",
|"exchangeRates":[{"validFrom":"2021-06-15","validTo":"2021-06-15","currencyCode":"ARS","exchangeRate":133.25,"currencyName":"Peso"}]}"""
.stripMargin
"must call the xrs exchange rate service and receive the response" in {
server.stubFor(
post(urlEqualTo("/passengers/exchangerequest/xrs/getexchangerate/v1"))
.willReturn(aResponse().withStatus(OK).withBody(mockedJsonResponse))
)
val app = builder.build()
running(app) {
val worker = app.injector.instanceOf[XrsExchangeRateRequestWorker]
val workerResponse = worker.tap.pull.futureValue.value
workerResponse.status shouldBe OK
workerResponse.body shouldBe mockedJsonResponse
}
}
"Handle the service unavailable response from Xrs service" in {
server.stubFor(
post(urlEqualTo("/passengers/exchangerequest/xrs/getexchangerate/v1"))
.willReturn(aResponse().withStatus(SERVICE_UNAVAILABLE))
)
val app = builder.build()
running(app) {
val worker = app.injector.instanceOf[XrsExchangeRateRequestWorker]
val workerResponse = worker.tap.pull.futureValue.value
workerResponse.status shouldBe SERVICE_UNAVAILABLE
}
}
"Fail fast - Circuit breaker should return the fall back method" in {
server.stubFor(
post(urlEqualTo("/passengers/exchangerequest/xrs/getexchangerate/v1"))
.willReturn(aResponse().withStatus(BAD_REQUEST))
)
val app = builder.build()
running(app) {
val worker = app.injector.instanceOf[XrsExchangeRateRequestWorker]
val workerResponse = worker.tap.pull.futureValue.value
workerResponse.status shouldBe SERVICE_UNAVAILABLE
}
}
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/workers/XrsExchangeRateRequestWorker.scala | <reponame>hmrc/currency-conversion<gh_stars>0
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.workers
import akka.stream.scaladsl.{Keep, Sink, SinkQueueWithCancel, Source}
import akka.stream.{ActorAttributes, Materializer, Supervision}
import com.google.inject.{Inject, Singleton}
import play.api.Configuration
import play.api.i18n.Lang.logger.logger
import uk.gov.hmrc.currencyconversion.connectors.HODConnector
import uk.gov.hmrc.currencyconversion.repositories.ExchangeRateRepository
import uk.gov.hmrc.http.HttpReads.{is2xx, is4xx}
import uk.gov.hmrc.http.HttpResponse
import play.api.http.Status.SERVICE_UNAVAILABLE
import play.api.libs.json.{JsObject, JsValue}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.control.Exception._
@Singleton
class XrsExchangeRateRequestWorker @Inject()(
config: Configuration,
hodConnector: HODConnector,
writeExchangeRateRepository: ExchangeRateRepository
)(implicit mat: Materializer, ec: ExecutionContext) {
private val initialDelayFromConfig = config.get[String]("workers.xrs-exchange-rate.initial-delay").replace('.',' ')
private val parallelism = config.get[String]("workers.xrs-exchange-rate.parallelism").replace('.',' ')
private val initialDelayFromConfigFiniteDuration = config.get[FiniteDuration]("workers.xrs-exchange-rate.initial-delay")
private val finiteInitialDelay = Duration(initialDelayFromConfig)
private val initialDelay = Some(finiteInitialDelay).collect { case d: FiniteDuration => d }.getOrElse(initialDelayFromConfigFiniteDuration)
private val intervalFromConfig = config.get[String]("workers.xrs-exchange-rate.interval").replace('.',' ')
private val intervalFromConfigFiniteDuration = config.get[FiniteDuration]("workers.xrs-exchange-rate.interval")
private val finiteInterval = Duration(intervalFromConfig)
private val interval = Some(finiteInterval).collect { case d: FiniteDuration => d }.getOrElse(intervalFromConfigFiniteDuration)
private val supervisionStrategy: Supervision.Decider = {
case NonFatal(_) => Supervision.resume
case _ => Supervision.stop
}
val tap: SinkQueueWithCancel[HttpResponse] = {
Source.tick(initialDelay, interval, Tick())
.mapAsync(allCatch.opt(parallelism.toInt).getOrElse(1))
{
_ => hodConnector.submit().flatMap {
case response: HttpResponse if is2xx(response.status) =>
val exchangeRatesJson = response.json.as[JsObject]
writeExchangeRateRepository.insertOrUpdate(exchangeRatesJson)
Future.successful(response)
case response: HttpResponse if is4xx(response.status) =>
logger.error(s"XRS_BAD_REQUEST_FROM_EIS_ERROR [XrsExchangeRateRequestWorker] call to DES (EIS) is failed. ${response.toString}")
Future.successful(response)
case _ => logger.error(s"XRS_BAD_REQUEST_FROM_EIS_ERROR [XrsExchangeRateRequestWorker] BAD Request is received from DES (EIS)")
Future.successful(HttpResponse(SERVICE_UNAVAILABLE, "Service Unavailable"))
}
}
.wireTapMat(Sink.queue())(Keep.right)
.toMat(Sink.ignore)(Keep.left)
.withAttributes(ActorAttributes.supervisionStrategy(supervisionStrategy))
.run()
}
}
case class Tick()
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/models/ExchangeRate.scala | <filename>app/uk/gov/hmrc/currencyconversion/models/ExchangeRate.scala
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.models
import play.api.libs.functional.syntax.toFunctionalBuilderOps
import play.api.libs.json.{Json, OWrites, Reads, __}
import java.time.LocalDate
case class ExchangeRate(
validFrom: LocalDate,
validTo: LocalDate,
currencyCode: String,
exchangeRate: BigDecimal,
currencyName: String
)
object ExchangeRate {
implicit lazy val reads: Reads[ExchangeRate] = (
(__ \ "validFrom").read[LocalDate] and
(__ \"validTo").read[LocalDate] and
(__ \ "currencyCode").read[String] and
(__ \ "exchangeRate").read[BigDecimal] and
(__ \ "currencyName").read[String]
)(ExchangeRate.apply _)
implicit lazy val writes: OWrites[ExchangeRate] = Json.writes[ExchangeRate]
}
case class ExchangeRateData(
timestamp: String,
correlationId: String,
exchangeData: Seq[ExchangeRate]
)
object ExchangeRateData {
implicit lazy val reads: Reads[ExchangeRateData] = (
(__ \ "timestamp").read[String] and
(__ \"correlationid").read[String] and
(__ \ "exchangeRates").read[Seq[ExchangeRate]]
)(ExchangeRateData.apply _)
implicit lazy val writes: OWrites[ExchangeRateData] = Json.writes[ExchangeRateData]
}
|
hmrc/currency-conversion | project/AppDependencies.scala | <gh_stars>0
import play.core.PlayVersion
import play.sbt.PlayImport._
import sbt.Keys.libraryDependencies
import sbt._
object AppDependencies {
val compile = Seq(
ws,
"uk.gov.hmrc" %% "bootstrap-backend-play-28" % "5.11.0",
"uk.gov.hmrc.mongo" %% "hmrc-mongo-play-28" % "0.52.0",
"org.reactivemongo" %% "reactivemongo-akkastream" % "0.20.13"
)
def test(scope: String = "test,it") = Seq(
"org.scalatest" %% "scalatest" % "3.2.9" % scope,
"org.pegdown" % "pegdown" % "1.6.0" % scope,
"org.mockito" % "mockito-all" % "2.0.2-beta" % "test",
"org.scalatestplus" %% "mockito-3-4" % "3.2.9.0",
"org.scalatestplus.play" %% "scalatestplus-play" % "5.1.0" % "test,it",
"com.typesafe.play" %% "play-test" % PlayVersion.current % scope,
"com.github.tomakehurst" % "wiremock-standalone" % "2.27.2",
"com.github.netcrusherorg" % "netcrusher-core" % "0.10",
"com.vladsch.flexmark" % "flexmark-all" % "0.36.8"
)
}
|
hmrc/currency-conversion | app/uk/gov/hmrc/currencyconversion/repositories/ExchangeRateRepository.scala | <reponame>hmrc/currency-conversion<gh_stars>0
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.currencyconversion.repositories
import java.time.LocalDate
import uk.gov.hmrc.currencyconversion.models.ExchangeRateObject
import akka.stream.Materializer
import com.google.inject.{Inject, Singleton}
import org.mongodb.scala.model.Filters.equal
import org.mongodb.scala.model.{FindOneAndUpdateOptions, ReturnDocument, Updates}
import play.api.i18n.Lang.logger.logger
import play.api.libs.json.JsObject
import uk.gov.hmrc.mongo.MongoComponent
import uk.gov.hmrc.mongo.play.json.{Codecs, PlayMongoRepository}
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.{implicitConversions, postfixOps}
import scala.util.Try
@Singleton
class DefaultExchangeRateRepository @Inject() (mongoComponent: MongoComponent
) (implicit ec: ExecutionContext, m: Materializer) extends PlayMongoRepository[ExchangeRateObject](
collectionName = "exchangeCurrencyData",
mongoComponent = mongoComponent,
domainFormat = ExchangeRateObject.format,
indexes = Seq())
with ExchangeRateRepository {
private def date = LocalDate.now()
private def currentFileName: String = "exrates-monthly-%02d".format(date.getMonthValue) +
date.getYear.toString.substring(2)
def get(fileName: String): Future[Option[ExchangeRateObject]] =
collection.find(equal("_id" , Codecs.toBson(fileName))).headOption()
def isDataPresent(fileName: String): Boolean = {
val existingData = get(fileName)
Await.ready(existingData, 3 second)
existingData.value.get.get.isEmpty
}
def insert(exchangeRateData: JsObject): Unit = {
Try(
{
val data = ExchangeRateObject(currentFileName, exchangeRateData)
collection.insertOne(data).toFuture()
logger.info(s"[ExchangeRateRepository] writing to mongo is successful $currentFileName")
}
).getOrElse(
{
logger.error(s"XRS_FILE_CANNOT_BE_WRITTEN_ERROR [ExchangeRateRepository] " + s"writing to mongo is failed ")
throw new Exception(s"unable to insert exchangeRateRepository")
}
)
}
def update(exchangeRateData: JsObject): Unit = {
Try({
collection.findOneAndUpdate(equal("_id", Codecs.toBson(currentFileName)),
Updates.set("exchangeRateData",Codecs.toBson(exchangeRateData)),
options = FindOneAndUpdateOptions().upsert(false).returnDocument(ReturnDocument.AFTER)).toFuture()
logger.info(s"[ExchangeRateRepository] writing to mongo is successful $currentFileName")
}
).getOrElse(
{
logger.error(s"XRS_FILE_CANNOT_BE_WRITTEN_ERROR [ExchangeRateRepository] " + s"writing to mongo is failed")
throw new Exception(s"unable to insert exchangeRateRepository ")}
)
}
private def deleteOlderExchangeData() = {
val sixMonthOldDate = LocalDate.now.minusMonths(6.toInt)
val oldFileName = "exrates-monthly-%02d".format(sixMonthOldDate.getMonthValue) +
sixMonthOldDate.getYear.toString.substring(2)
collection.findOneAndDelete(equal("_id", oldFileName)).toFuture() map {
case result => logger.info(s"[ExchangeRateRepository] deleting older data from mongo is successful $oldFileName")
case _ => logger.info(s"[ExchangeRateRepository] no older data is available")
}
}
def insertOrUpdate(exchangeRateData: JsObject): Future[Any] = {
get(currentFileName) map {
case response if response.isEmpty => insert(exchangeRateData)
Future.successful(response)
case _ => update(exchangeRateData)
Future.successful(None)
}
deleteOlderExchangeData()
Future.successful(None)
}
}
trait ExchangeRateRepository {
def insert(data: JsObject): Unit
def update(data: JsObject): Unit
def get(fileName: String): Future[Option[ExchangeRateObject]]
def insertOrUpdate(data: JsObject):Future[Any]
def isDataPresent(fileName: String): Boolean
}
|
lorandszakacs/pbkdf2-scala | src/test/scala/io/github/nremond/legacy/LegacySecureHashSpec.scala | <gh_stars>0
/**
* Copyright 2013 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.nremond.legacy
import org.scalatest._
class LegacySecureHashSpec extends FlatSpec with Matchers with Inspectors {
val secureHash = SecureHash()
val passwords = Vector("password", ":-( or :-)", "2¢", """H"qvVL5.y629_BA;1%:f/[OGo/B]x*UR2X:OUO3C/UKus$q.%$q@xmkJk&<_k+|""")
"SecureHash" should "be able to hash passwords and verify them" in {
val hashedPasswords = passwords.map(secureHash.createHash(_))
forAll(passwords.zip(hashedPasswords)) {
case (pwd: String, hashedPwd: String) =>
secureHash.validatePassword(pwd, hashedPwd) should be(true)
}
}
it should "only validate correct password" in {
val password = "<PASSWORD>"
val incorrectHash = "dead:beef"
secureHash.validatePassword(password, incorrectHash) should be(false)
}
}
|
lorandszakacs/pbkdf2-scala | src/test/scala/io/github/nremond/SecureHashSpecification.scala | <reponame>lorandszakacs/pbkdf2-scala
package io.github.nremond
import io.github.nremond.SecureHash._
import org.scalacheck.Prop.{ BooleanOperators, forAll }
import org.scalacheck.{ Gen, Properties }
object SecureHashSpecification extends Properties("SecureHash") {
val iterationsGen = Gen.chooseNum(10000, 30000)
val dkLengthGen = Gen.oneOf(16, 32, 64)
val algoGen = Gen.oneOf(SecureHash.internals.javaAlgoToPassLibAlgo.keys.toSeq)
property("createHash and validatePassword should always round trip") =
forAll(Gen.alphaStr, iterationsGen, dkLengthGen, algoGen) {
(a: String, iterations: Int, dkl: Int, algo: String) =>
!a.isEmpty ==> validatePassword(a, createHash(a, iterations, dkl, algo))
}
}
|
lorandszakacs/pbkdf2-scala | src/test/scala/io/github/nremond/SecureHashSpec.scala | <reponame>lorandszakacs/pbkdf2-scala
/**
* Copyright 2012-2014 <NAME> (@nremond)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.nremond
import java.nio.ByteBuffer
import java.security.SecureRandom
import org.scalatest._
class SecureHashSpec extends FlatSpec with Matchers with Inspectors {
"SecureHash.internals" should "encode the output properly" in {
import SecureHash.internals._
val salt = ByteBuffer.allocate(3).put(0.toByte).array()
val res = List(
encode(salt, salt, 22000, "Alg1") -> "$pbkdf2-Alg1$22000$AAAA$AAAA",
encode(salt, salt, 22000, "HmacSHA1") -> "$pbkdf2-sha1$22000$AAAA$AAAA",
encode(salt, salt, 22000, "HmacSHA256") -> "$pbkdf2-sha256$22000$AAAA$AAAA",
encode(salt, salt, 22000, "HmacSHA512") -> "$pbkdf2-sha512$22000$AAAA$AAAA")
forAll(res) {
x => x._1 should be(x._2)
}
}
it should "decode the input properly" in {
import SecureHash.internals._
val Some(Decoded(version, iterations, algo, salt, hash)) = decode("$pbkdf2-sha512$2222$AAAA$AAAAAAAA")
version should be("pbkdf2")
algo should be("HmacSHA512")
iterations should be(2222)
val zero = 0.toByte
salt should be(Array[Byte](zero, zero, zero))
hash should be(Array[Byte](zero, zero, zero, zero, zero, zero))
}
val passwords = Vector("password", ":-( or :-)", "2¢", """H"qvVL5.y629_BA;1%:f/[OGo/B]x*UR2X:OUO3C/UKus$q.%$q@xmkJk&<_k+|""")
it should "roundtrip " in {
import SecureHash.internals._
def getBytes(i: Int) = {
val b = new Array[Byte](i)
new SecureRandom().nextBytes(b)
b
}
val salt = getBytes(32)
val hash = getBytes(64)
val output = encode(salt, hash, 100, "test")
val Some(decoded) = decode(output)
decoded.salt should be(salt)
decoded.key should be(hash)
decoded.iterations should be(100)
decoded.algo should be("test")
}
"SecureHash" should "be able to hash passwords and verify them" in {
val hashedPasswords = passwords.map(SecureHash.createHash(_))
forAll(passwords.zip(hashedPasswords)) {
case (pwd: String, hashedPwd: String) =>
SecureHash.validatePassword(pwd, hashedPwd) should be(true)
}
}
it should "only validate correct password" in {
val password = "<PASSWORD>"
val incorrectHash = "dead:beef"
SecureHash.validatePassword(password, incorrectHash) should be(false)
}
it should "only validate correct password using legacy validate method" in {
val password = "<PASSWORD>"
val incorrectHash = "$pbkdf2-sha256$6400$.6UI/S.nXIk8jcbdHx3Fhg$98jZicV16ODfEsEZeYPGHU3kbrUrvUEXOPimVSQDD44"
legacy.SecureHash().validatePassword(password, incorrectHash) should be(false)
}
it should "be compatible with Passlib" in {
// Exemples from: https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
val hashs = Vector("$pbkdf2-sha256$6400$0ZrzXitFSGltTQnBWOsdAw$Y11AchqV4b0sUisdZd0Xr97KWoymNE0LNNrnEgY4H9M",
"$pbkdf2-sha256$6400$.6UI/S.nXIk8jcbdHx3Fhg$98jZicV16ODfEsEZeYPGHU3kbrUrvUEXOPimVSQDD44")
hashs.foreach { hash =>
SecureHash.validatePassword("password", hash) should be(true)
SecureHash.validatePassword("wrong", hash) should be(false)
}
}
}
|
lorandszakacs/pbkdf2-scala | src/test/scala/io/github/nremond/PBKDF2Spec.scala | /**
* Copyright 2013 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.nremond
import org.scalatest.{ FlatSpec, Matchers }
import scala.io.Codec.UTF8
/**
* This spec contains test vectors for the Public-Key Cryptography
* Standards (PKCS) #5 Password-Based Key Derivation Function 2 (PBKDF2)
* with the Hash-based Message Authentication Code (HMAC) Secure Hash
* Algorithm (SHA-1) pseudorandom function.
*
* See RFC6070 (http://tools.ietf.org/html/rfc6070)
*
*/
class PBKDF2Spec extends FlatSpec with Matchers {
def pbkdf2(password: String, salt: String, iterations: Int, dkLength: Int, cryptoAlgo: String) =
toHex(PBKDF2(password.getBytes(UTF8.charSet), salt.getBytes(UTF8.charSet), iterations, dkLength, cryptoAlgo))
it should "work with the 1st test vector" in {
pbkdf2("password", "salt", 2, 20, "HmacSHA1") should equal("<PASSWORD>")
}
it should "work with the 2nd test vector" in {
pbkdf2("password", "salt", 4096, 20, "HmacSHA1") should equal("<PASSWORD>")
}
// It takes too long, I'm ignoring it.
ignore should "work with the 3rd test vector" in {
pbkdf2("password", "<PASSWORD>", 16777216, 20, "HmacSHA1") should equal("eefe3d61cd4da4e4e9945b3d6ba2158c2634e984")
}
it should "work with the 4th test vector" in {
pbkdf2("password<PASSWORD>", "<PASSWORD>Ts<PASSWORD>SALTs<PASSWORD>", 4096, 25, "HmacSHA1") should equal("3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038")
}
it should "work with the 5th test vector" in {
pbkdf2("<PASSWORD>", "sa\u0000lt", 4096, 16, "HmacSHA1") should equal("56fa6aa75548099dcc37d7f03425e0c3")
}
} |
lorandszakacs/pbkdf2-scala | src/main/scala/io/github/nremond/SecureHash.scala | <gh_stars>0
/**
* Copyright 2012-2014 <NAME> (@nremond)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.nremond
import java.nio.charset.StandardCharsets.UTF_8
import java.security.SecureRandom
import java.util.Base64
import scala.util.Try
/**
* Implements functionality to create and validate password hashes using [[PBKDF2]]
*/
object SecureHash {
import internals._
/**
* Creates a hashed password using [[PBKDF2]]
*
* this function output a string in the modified MCF format :
*
* p0\$params\$salt\$key
*
* - p0 : version 0 of the format
*
* - params: 8 digit hexadecimal representation of the number of iterations concatenated with the algo name
*
* - salt : Base64 encoded salt
*
* - key : Base64 encoded derived key
*
* Example :
*
* p0\$00004e20HmacSHA256\$mOCtN/Scjry0uIALe4bCCrL9eL8aWEA/\$hDxtqCnBF1MS5qIOxHeDAZ23QEmqdL7796I0pVJ2yvQ
*
* @param password the password to hash
* @param iterations the number of encryption iterations, default to 20000
* @param dkLength derived-key length, default to 32
* @param cryptoAlgo HMAC+SHA512 is the default as HMAC+SHA1 is now considered weak
* @param saltLength length of the salt, default to 24
*/
def createHash(password: String, iterations: Int = 20000,
dkLength: Int = 32, cryptoAlgo: String = "HmacSHA512", saltLength: Int = 24): String = {
val salt = {
val b = new Array[Byte](saltLength)
(new SecureRandom).nextBytes(b)
b
}
val key = PBKDF2(password.getBytes(UTF_8), salt, iterations, dkLength, cryptoAlgo)
encode(salt, key, iterations, cryptoAlgo)
}
/**
* Tests two byte arrays for value equality in constant time.
*
* @note This function leaks information about the length of each byte array as well as
* whether the two byte arrays have the same length.
* @see [[http://codahale.com/a-lesson-in-timing-attacks]]
*/
private[this] def secure_==(a1: Array[Byte], a2: Array[Byte]): Boolean =
a1.length == a2.length && a1.zip(a2).foldLeft(0) { case (r, (x1, x2)) => r | x1 ^ x2 } == 0
/**
* Validate a password against a password hash
*
* @param password the password to validate
* @param hashedPassword the password hash. This should be in the same format as generated by [[SecureHash.createHash]]
* @return true is the password is valid
*/
def validatePassword(password: String, hashedPassword: String): Boolean = decode(hashedPassword) match {
case Some(Decoded(_, iterations, algo, salt, key)) =>
val hash = PBKDF2(password.getBytes(UTF_8), salt, iterations, key.length, algo)
secure_==(key, hash)
case _ => false
}
private[nremond] object internals {
def encode(salt: Array[Byte], key: Array[Byte], iterations: Int, algo: String): String = {
val iters = iterations.toString
// use hash name compatible with PassLib (https://pythonhosted.org/passlib/index.html)
val compAlgo = javaAlgoToPassLibAlgo.getOrElse(algo, algo)
s"$$pbkdf2-$compAlgo$$$iters$$${b64Encoder(salt)}$$${b64Encoder(key)}"
}
case class Decoded(version: String, iterations: Int, algo: String, salt: Array[Byte], key: Array[Byte])
def decode(s: String): Option[Decoded] = Try {
s match {
case rx(a, i, s, h) => Some(Decoded("pbkdf2", i.toInt, passLibAlgoToJava.getOrElse(a, a), b64Decoder(s), b64Decoder(h)))
case _ => None
}
}.toOption.flatten
private[nremond] val javaAlgoToPassLibAlgo = Map("HmacSHA1" -> "sha1", "HmacSHA256" -> "sha256", "HmacSHA512" -> "sha512")
private[nremond] val passLibAlgoToJava = javaAlgoToPassLibAlgo.map(_.swap)
private[this] val rx = "\\$pbkdf2-([^\\$]+)\\$(\\d+)\\$([^\\$]*)\\$([^\\$]*)".r
private[this] def b64Decoder(s: String) =
Base64.getDecoder.decode(s.replace(".", "+"))
private[this] def b64Encoder(ba: Array[Byte]) =
Base64.getEncoder.withoutPadding.encodeToString(ba).replace("+", ".")
}
}
|
lorandszakacs/pbkdf2-scala | src/main/scala/io/github/nremond/legacy/SecureHash.scala | <reponame>lorandszakacs/pbkdf2-scala<gh_stars>0
/**
* Copyright 2012-2014 <NAME> (@nremond)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.nremond.legacy
import java.nio.charset.StandardCharsets.UTF_8
import java.security.SecureRandom
import java.util.Arrays
import io.github.nremond._
import scala.annotation.tailrec
/**
* This is the legacy API.
*
* @param iterations the number of encryption iterations. Default to 20000
* @param dkLength derived-key length, default to 32
* @param cryptoAlgo HMAC+SHA512 is the default as HMAC+SHA1 is now considered weak
*/
case class SecureHash(iterations: Int = 20000, dkLength: Int = 32, cryptoAlgo: String = "HmacSHA512") {
/**
* Creates a hashed password using [[PBKDF2]]
*
* this function output a string in the following format:
*
* salt:key
*
* - salt : hex encoded salt
*
* - key : hex encoded derived key
*
* Example :
*
* a9c654289407047fd197516196e14b97bdabfa4bc934d0e9:f<PASSWORD>bd<PASSWORD>
*
* @param password the password to hash
*/
@deprecated("Only create password via io.github.nremond.SecureHash", "pbkdf2-scala 0.5")
def createHash(password: String): String = {
val random = new SecureRandom
val salt = new Array[Byte](24) //192 bits
random.nextBytes(salt)
val hash = PBKDF2(password.getBytes(UTF_8), salt, iterations, dkLength, cryptoAlgo)
raw"${toHex(salt)}:${toHex(hash)}"
}
/**
* Validate a password against a password hash
*
* this function will first try to validate with the *new* format as generated
* by [[io.github.nremond.SecureHash.createHash]]. if it fails, it will fall back to the *old* format,
* making this function very useful when transitioning form the *old* to the *new* format.
*
* @param password the password to validate
* @param hashedPassword the password hash.
* @return true is the password is valid
*/
def validatePassword(password: String, hashedPassword: String): Boolean =
//Try new format first and then fall back to legacy
if (io.github.nremond.SecureHash.validatePassword(password, hashedPassword))
true
else
legacyValidatePassword(password, hashedPassword)
private[this] def legacyValidatePassword(password: String, hashedPassword: String): Boolean = {
val params = hashedPassword.split(":")
if (params.size == 2) {
val salt = fromHex(params(0))
val hash = PBKDF2(password.getBytes(UTF_8), salt, iterations, dkLength, cryptoAlgo)
Arrays.equals(fromHex(params(1)), hash)
} else
false
}
}
|
lorandszakacs/pbkdf2-scala | src/main/scala/io/github/nremond/package.scala | <gh_stars>0
package io.github
package object nremond {
def toHex(buff: Array[Byte]): String = {
val bi = new java.math.BigInteger(1, buff)
val hex = bi.toString(16)
val paddingLength = (buff.length * 2) - hex.size
if (paddingLength > 0)
("0" * paddingLength) + hex
else
hex
}
def fromHex(hex: String): Array[Byte] = {
val binary = new Array[Byte](hex.length / 2)
for (i <- 0 until binary.length)
binary.update(i, Integer.parseInt(hex.substring(2 * i, 2 * i + 2), 16).asInstanceOf[Byte])
binary
}
} |
lorandszakacs/pbkdf2-scala | project/plugins.sbt | <filename>project/plugins.sbt
resolvers += Classpaths.sbtPluginReleases
addSbtPlugin("org.scoverage" % "sbt-coveralls" % "1.2.2")
addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.1.1")
addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "2.4")
addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.11")
addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.8.2")
|
sadhen/cmark-scala | build.sbt | enablePlugins(ScalaNativePlugin)
name := "cmark-scala"
version := "0.1.0-SNAPSHOT"
organization := "tech.sparse"
scalaVersion := "2.11.12"
|
sadhen/cmark-scala | src/main/scala/cmark/cmark.scala | package cmark
import scala.scalanative.native._
@link("cmark")
@extern
object cmark {
/** Convert 'text' (assumed to be a UTF-8 encoded string with length
* 'len') from CommonMark Markdown to HTML, returning a null-terminated,
* UTF-8-encoded string. It is the caller's responsibility
* to free the returned buffer.
*/
@name("cmark_markdown_to_html")
def markdownToHtml(text: CString, len: CSize, options: CInt): CString = extern
/**
* ## Version information
*/
/** The library version as integer for runtime checks.
*
* * Bits 16-23 contain the major version.
* * Bits 8-15 contain the minor version.
* * Bits 0-7 contain the patchlevel.
*
* In hexadecimal format, the number 0x010203 represents version 1.2.3.
*/
@name("cmark_version")
def version(): CInt = extern
/** The library version string for runtime checks. */
@name("cmark_version_string")
def versionString(): CString = extern
}
|
sadhen/cmark-scala | src/main/scala/cmark/Node.scala | package cmark
import scala.scalanative.native._
/**
* ## Node
*/
@link("cmark")
@extern
object Node {
/** Creates a new node of type 'type'. Note that the node may have
* other required properties, which it is the caller's responsibility
* to assign.
*/
@name("cmark_node_new")
def create(nodeType: NodeType): Ptr[Node] = extern
/** Frees the memory allocated for a node and any children. */
@name("cmark_node_free")
def free(doc: Ptr[Node]): Unit = extern
/**
* ## Tree Traversal
*/
/** @return The next node in the sequence after 'node', or null if
* there is none.
*/
@name("cmark_node_next")
def nodeNext(node: Ptr[Node]): Ptr[Node] = extern
/** @return The previous node in the sequence after 'node', or null if
* there is none.
*/
@name("cmark_node_previous")
def nodePrevious(node: Ptr[Node]): Ptr[Node] = extern
/** @return The parent of 'node', or null if there is none. */
@name("cmark_node_parent")
def parent(node: Ptr[Node]): Ptr[Node] = extern
/** @return The first child of 'node', or null if 'node' has no children.
*/
@name("cmark_node_first_child")
def firstChild(node: Ptr[Node]): Ptr[Node] = extern
/** @return The last child of 'node', or null if 'node' has no children.
*/
@name("cmark_node_last_child")
def lastChild(node: Ptr[Node]): Ptr[Node] = extern
/**
* ## Accessors
*/
/** @return The user data of 'node'. */
@name("cmark_node_get_user_data")
def getUserData(node: Ptr[Node]): Ptr[Byte] = extern
/** Sets arbitrary user data for 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_set_user_data")
def setUserData(node: Ptr[Node], userData: Ptr[Byte]): CInt = extern
/** @return The type of 'node', or [[NodeType.None]] on error. */
@name("cmark_node_get_type")
def getType(node: Ptr[Node]): NodeType = extern
/** Like 'cmark_node_get_type', but returns a string representation
of the type, or `"<unknown>"`.
*/
@name("cmark_node_get_type_string")
def getTypeString(node: Ptr[Node]): CString = extern
/** @return The string contents of 'node', or an empty string if none
* is set.
*/
@name("cmark_node_get_literal")
def getLiteral(node: Ptr[Node]): CString = extern
/** Sets the string contents of 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_set_literal")
def setLiteral(node: Ptr[Node], content: CString): CInt = extern
/** @return The heading level of 'node', or 0 if 'node' is not a heading.
*/
@name("cmark_node_get_heading_level")
def getHeadingLevel(node: Ptr[Node]): CInt = extern
/** Sets the heading level of 'node'
* @return 1 on success and 0 on error.
*/
@name("cmark_node_set_heading_level")
def setHeadingLevel(node: Ptr[Node], level: CInt): CInt = extern
/** @return The list type of 'node', or [[ListType.None]] if 'node'
* is not a list.
*/
@name("cmark_node_get_list_type")
def getListType(node: Ptr[Node]): ListType = extern
/** Sets the list type of 'node'
* @return 1 on success and 0 on error.
*/
@name("cmark_node_set_list_type")
def setListType(node: Ptr[Node], tpe: ListType): CInt = extern
/** @return The list delimiter type of 'node', or [[DelimType.None]] if 'node'
* is not a list.
*/
@name("cmark_node_get_list_delim")
def getListDelim(node: Ptr[Node]): DelimType = extern
/** Sets the list delimiter type of 'node'
* @return 1 on success and 0 on error.
*/
@name("cmark_node_set_list_delim")
def setListDelim(node: Ptr[Node], delim: DelimType): CInt = extern
/** @return starting number of 'node', if it is an ordered list, otherwise 0. */
@name("cmark_node_get_list_start")
def getListStart(node: Ptr[Node]): CInt = extern
/** Sets starting number of 'node', if it is an ordered list.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_set_list_start")
def setListStart(node: Ptr[Node], start: CInt): CInt = extern
/** @return 1 if 'node' is a tight list, 0 otherwise. */
@name("cmark_node_get_list_tight")
def getListTight(node: Ptr[Node]): CInt = extern
/** Sets the "tightness" of a list.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_set_list_tight")
def setListTight(node: Ptr[Node], tight: CInt): CInt = extern
/** @return The info string from a fenced code block. */
@name("cmark_node_get_fence_info")
def getFenceInfo(node: Ptr[Node]): CString = extern
/** Sets the info string in a fenced code block
* @return 1 on success and 0 on failure.
*/
@name("cmark_node_set_fence_info")
def setFenceInfo(node: Ptr[Node], info: CString): CInt = extern
/** @return The URL of a link or image 'node', or an empty string if no URL
* is set.
*/
@name("cmark_node_get_url")
def getUrl(node: Ptr[Node]): CString = extern
/** Sets the URL of a link or image 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_set_url")
def setUrl(node: Ptr[Node], url: CString): CInt = extern
/** @return The title of a link or image 'node', or an empty string if no
* title is set.
*/
@name("cmark_node_get_title")
def getTitle(node: Ptr[Node]): CString = extern
/** Sets the title of a link or image 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_set_title")
def setTitle(node: Ptr[Node], title: CString): CInt = extern
/** @return The literal "on enter" text for a custom 'node', or
an empty string if no onEnter is set.
*/
@name("cmark_node_get_on_enter")
def getOnEnter(node: Ptr[Node]): CString = extern
/** Sets the literal text to render "on enter" for a custom 'node'. Any
* children of the node will be rendered after this text.
* @return 1 on success 0 on failure.
*/
@name("cmark_node_set_on_enter")
def setOnEnter(node: Ptr[Node], onEnter: CString): CInt = extern
/** @return The literal "on exit" text for a custom 'node', or an empty string
* if no on_exit is set.
*/
@name("cmark_node_get_on_exit")
def getOnExit(node: Ptr[Node]): CString = extern
/** Sets the literal text to render "on exit" for a custom 'node'.
* Any children of the node will be rendered before this text.
* @return 1 on success 0 on failure.
*/
@name("cmark_node_set_on_exit")
def setOnExit(node: Ptr[Node], onExit: CString): CInt = extern
/** @return The line on which 'node' begins. */
@name("cmark_node_get_start_line")
def getStartLine(node: Ptr[Node]): CInt = extern
/** @return The column at which 'node' begins. */
@name("cmark_node_get_start_column")
def getStartColumn(node: Ptr[Node]): CInt = extern
/** @return The line on which 'node' ends. */
@name("cmark_node_get_end_line")
def getEndLine(node: Ptr[Node]): CInt = extern
/** @return The column at which 'node' ends. */
@name("cmark_node_get_end_column")
def getEndColumn(node: Ptr[Node]): CInt = extern
/**
* ## Tree Manipulation
*/
/** Unlinks a 'node', removing it from the tree, but not freeing its
* memory. (Use [[Node.free]] for that.)
*/
@name("cmark_node_unlink")
def unlink(node: Ptr[Node]): Unit = extern
/** Inserts 'sibling' before 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_insert_before")
def insertBefore(node: Ptr[Node], sibling: Ptr[Node]): CInt = extern
/** Inserts 'sibling' after 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_insert_after")
def insertAfter(node: Ptr[Node], sibling: Ptr[Node]): CInt = extern
/** Replaces 'oldNode' with 'newNode' and unlinks 'oldNode' (but does
* not free its memory).
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_replace")
def nodeReplace(oldNode: Ptr[Node], newNode: Ptr[Node]): CInt = extern
/** Adds 'child' to the beginning of the children of 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_prepend_child")
def prependChild(node: Ptr[Node], child: Ptr[Node]): CInt = extern
/** Adds 'child' to the end of the children of 'node'.
* @return 1 on success, 0 on failure.
*/
@name("cmark_node_append_child")
def appendChild(node: Ptr[Node], child: Ptr[Node]): CInt = extern
/** Consolidates adjacent text nodes. */
@name("cmark_consolidate_text_nodes")
def consolidateTextNodes(root: Ptr[Node]): Unit = extern
}
|
sadhen/cmark-scala | src/main/scala/cmark/Parser.scala | package cmark
import scala.scalanative.native._
import scala.scalanative.native.stdio.FILE
/**
* ## Parsing
*
* Simple interface:
*
* val document = Parser.parseDocument(c"Hello *world*", 13, Options.Default)
*
* Streaming interface:
*
* val parser = Parser.create(Options.Default)
* Parser.feed(parser, buffer, bytes)
* val document = Parser.finish(parser)
* Parser.free(parser)
*/
@link("cmark")
@extern
object Parser {
/** Creates a new parser object. */
@name("cmark_parser_new")
def create(options: CInt): Ptr[Parser] = extern
/** Creates a new parser object with the given memory allocator */
@name("cmark_parser_new_with_mem")
def createWithMem(options: CInt, mem: Ptr[Memory]): Ptr[Parser] = extern
/** Frees memory allocated for a parser object. */
@name("cmark_parser_free")
def free(parser: Ptr[Parser]): Unit = extern
/** Feeds a string of length 'len' to 'parser'. */
@name("cmark_parser_feed")
def feed(parser: Ptr[Parser], buffer: CString, bytes: CSize): Unit = extern
/** Finish parsing and return a pointer to a tree of nodes. */
@name("cmark_parser_finish")
def finish(parser: Ptr[Parser]): Ptr[Node] = extern
/** Parse a CommonMark document in 'buffer' of length 'len'.
* Returns a pointer to a tree of nodes. The memory allocated for
* the node tree should be released using [[Node.free]]
* when it is no longer needed.
*/
@name("cmark_parse_document")
def parseDocument(buffer: CString, len: CSize, options: CInt): Ptr[Node] = extern
/** Parse a CommonMark document in file 'file', returning a pointer to
* a tree of nodes. The memory allocated for the node tree should be
* released using [[Node.free]] when it is no longer needed.
*/
@name("cmark_parse_file")
def parseFile(file: Ptr[FILE], options: CInt): Ptr[Node] = extern
}
|
krutipandya/hello-world | src/main/scala/hello/HelloConfig.scala | <filename>src/main/scala/hello/HelloConfig.scala
package hello
import org.springframework.context.annotation.{Configuration, ComponentScan}
import org.springframework.boot.autoconfigure.EnableAutoConfiguration
import org.springframework.boot;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMethod;
/**
* This config class will trigger Spring @annotation scanning and auto configure Spring context.
*
* @author kruti
* @since 1.0
*/
@Controller
@Configuration
@EnableAutoConfiguration
@ComponentScan
class HelloConfig {
@RequestMapping(value=Array("/"), method=Array(RequestMethod.GET))
@ResponseBody
def home(): String = "Hello World!"
} |
bopopescu/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/OnlineAggregationUtils.scala | <gh_stars>1-10
/*
* Incremental calculation for variance.
*/
package org.apache.spark.sql
object commonMath {
def CNDF(x: Double): Double = {
var mX = x
val neg = if (mX < 0d) 1
else 0
if (neg == 1) mX = mX * -1d
val k = 1d / (1d + 0.2316419 * mX)
var y = ((((1.330274429 * k - 1.821255978) * k + 1.781477937) * k - 0.356563782)
* k + 0.319381530) * k
y = 1.0 - 0.398942280401 * Math.exp(-0.5 * mX * mX) * y
val retP = (1d - neg) * y + neg * (1d - y)
if (retP > 1) 1 else retP
}
/**
* Compute the 'p'-th quantile for the "standard normal distribution" function.
*
* @param p the p-th quantile, e.g., .95 (95%)
* @return the 'p'-th quantile for the "standard normal distribution" function.
* This function returns an approximation of the "inverse" cumulative
* standard normal distribution function, i.e., given 'p', it returns an
* approximation to the 'x' satisfying
* <p>
* p = F(x) = P(Z <= x)
* <p>
* where Z is a random variable from the standard normal distribution.
* The algorithm uses a minimax approximation by rational functions and the
* result has a relative error whose absolute value is less than 1.15e-9.
*/
def normalInv(p: Double = .95): Double = {
// Coefficients in rational approximations
val a = Array(-3.969683028665376e+01, 2.209460984245205e+02,
-2.759285104469687e+02, 1.383577518672690e+02,
-3.066479806614716e+01, 2.506628277459239e+00)
val b = Array(-5.447609879822406e+01, 1.615858368580409e+02,
-1.556989798598866e+02, 6.680131188771972e+01,
-1.328068155288572e+01)
val c = Array(-7.784894002430293e-03, -3.223964580411365e-01,
-2.400758277161838e+00, -2.549732539343734e+00,
4.374664141464968e+00, 2.938163982698783e+00)
val d = Array(7.784695709041462e-03, 3.224671290700398e-01,
2.445134137142996e+00, 3.754408661907416e+00)
// Define break-points
val plow = 0.02425
val phigh = 1 - plow
// Rational approximation for lower region:
if (p < plow) {
val q = math.sqrt(-2 * math.log(p))
return (((((c(0) * q + c(1)) * q + c(2)) * q + c(3)) * q + c(4)) * q + c(5)) /
((((d(0) * q + d(1)) * q + d(2)) * q + d(3)) * q + 1)
}
// Rational approximation for upper region:
if (phigh < p) {
val q = math.sqrt(-2 * math.log(1 - p))
return -(((((c(0) * q + c(1)) * q + c(2)) * q + c(3)) * q + c(4)) * q + c(5)) /
((((d(0) * q + d(1)) * q + d(2)) * q + d(3)) * q + 1)
}
// Rational approximation for central region:
val q = p - 0.5
val r = q * q
(((((a(0) * r + a(1)) * r + a(2)) * r + a(3)) * r + a(4)) * r + a(5)) * q /
(((((b(0) * r + b(1)) * r + b(2)) * r + b(3)) * r + b(4)) * r + 1)
}
def main(args: Array[String]): Unit = {
implicit val arrayToSamples = (values: Array[Double]) => NumberSamples(values)
val historicalSamples = Array(1.5d, 3.4d, 7.8d, 11.6d)
val deltaSamples = Array(9.4d, 4.2d, 35.6d, 77.9d)
var deltaVar =
historicalSamples.measures.appendDelta(deltaSamples.measures).variance
}
def calcConfidence(errorBound: Double,
samplesCount: Long,
T_n_2: Double): Double = {
2 * CNDF((errorBound * math.sqrt(samplesCount)) / math.sqrt(T_n_2)) - 1
}
def calcErrorBound(confidence: Double,
samplesCount: Long,
T_n_2: Double): Double = {
val z_p = normalInv((1 + confidence) / 2)
math.sqrt(((z_p * z_p) * T_n_2) / samplesCount)
}
}
case class DeltaVarianceMeasures(n: Int, sum: Double, variance: Double) {
def mAvg: Double = sum / n
def appendDelta(delta: DeltaVarianceMeasures): DeltaVarianceMeasures = {
val newN = this.n + delta.n
val newSum = this.sum + delta.sum
val newAvg = newSum / newN
def partial(m: DeltaVarianceMeasures): Double = {
val deltaAvg = newAvg - m.mAvg
m.n * (m.variance + deltaAvg * deltaAvg)
}
val newVariance = (partial(this) + partial(delta)) / newN
DeltaVarianceMeasures(newN, newSum, newVariance)
}
}
case class NumberSamples(values: Seq[Double]) {
def measures: DeltaVarianceMeasures = {
if (values == null || values.isEmpty) {
DeltaVarianceMeasures(0, 0d, 0d)
}
else {
DeltaVarianceMeasures(values.length, values.sum, variance)
}
}
private def variance: Double = {
val n = values.length
val avg = values.sum / n
values.foldLeft(0d) { case (sum, sample) =>
sum + (sample - avg) * (sample - avg)
} / n
}
} |
streamline-eu/dynamic-flink | flink-libraries/flink-cep-scala/src/main/scala/org/apache/flink/cep/scala/pattern/Pattern.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.cep.scala.pattern
import org.apache.flink.cep
import org.apache.flink.cep.pattern.conditions.IterativeCondition
import org.apache.flink.cep.pattern.conditions.IterativeCondition.Context
import org.apache.flink.cep.pattern.{Quantifier, Pattern => JPattern}
import org.apache.flink.streaming.api.windowing.time.Time
/**
* Base class for a pattern definition.
*
* A pattern definition is used by [[org.apache.flink.cep.nfa.compiler.NFACompiler]] to create
* a [[org.apache.flink.cep.nfa.NFA]].
*
* {{{
* Pattern<T, F> pattern = Pattern.<T>begin("start")
* .next("middle").subtype(F.class)
* .followedBy("end").where(new MyFilterFunction());
* }
* }}}
*
* @param jPattern Underlying Java API Pattern
* @tparam T Base type of the elements appearing in the pattern
* @tparam F Subtype of T to which the current pattern operator is constrained
*/
class Pattern[T , F <: T](jPattern: JPattern[T, F]) {
private[flink] def wrappedPattern = jPattern
/**
*
* @return Name of the pattern operator
*/
def getName(): String = jPattern.getName()
/**
*
* @return Window length in which the pattern match has to occur
*/
def getWindowTime(): Option[Time] = {
Option(jPattern.getWindowTime())
}
/**
*
* @return currently applied quantifier to this pattern
*/
def getQuantifier: Quantifier = jPattern.getQuantifier
/**
*
* @return Filter condition for an event to be matched
*/
def getCondition(): Option[IterativeCondition[F]] = {
Option(jPattern.getCondition())
}
/**
* Applies a subtype constraint on the current pattern operator. This means that an event has
* to be of the given subtype in order to be matched.
*
* @param clazz Class of the subtype
* @tparam S Type of the subtype
* @return The same pattern operator with the new subtype constraint
*/
def subtype[S <: F](clazz: Class[S]): Pattern[T, S] = {
jPattern.subtype(clazz)
this.asInstanceOf[Pattern[T, S]]
}
/**
* Defines the maximum time interval for a matching pattern. This means that the time gap
* between first and the last event must not be longer than the window time.
*
* @param windowTime Time of the matching window
* @return The same pattern operator with the new window length
*/
def within(windowTime: Time): Pattern[T, F] = {
jPattern.within(windowTime)
this
}
/**
* Appends a new pattern operator to the existing one. The new pattern operator enforces strict
* temporal contiguity. This means that the whole pattern only matches if an event which matches
* this operator directly follows the preceding matching event. Thus, there cannot be any
* events in between two matching events.
*
* @param name Name of the new pattern operator
* @return A new pattern operator which is appended to this pattern operator
*/
def next(name: String): Pattern[T, T] = {
Pattern[T, T](jPattern.next(name))
}
/**
* Appends a new pattern operator to the existing one. The new pattern operator enforces
* non-strict temporal contiguity. This means that a matching event of this operator and the
* preceding matching event might be interleaved with other events which are ignored.
*
* @param name Name of the new pattern operator
* @return A new pattern operator which is appended to this pattern operator
*/
def followedBy(name: String): FollowedByPattern[T, T] = {
FollowedByPattern(jPattern.followedBy(name))
}
/**
* Specifies a filter condition which has to be fulfilled by an event in order to be matched.
*
* @param filter Filter condition
* @return The same pattern operator where the new filter condition is set
*/
def where(filter: IterativeCondition[F]): Pattern[T, F] = {
jPattern.where(filter)
this
}
/**
* Specifies a filter condition which is ORed with an existing filter function.
*
* @param filter Or filter function
* @return The same pattern operator where the new filter condition is set
*/
def or(filter: IterativeCondition[F]): Pattern[T, F] = {
jPattern.or(filter)
this
}
/**
* Specifies a filter condition which is ORed with an existing filter function.
*
* @param filterFun Or filter function
* @return The same pattern operator where the new filter condition is set
*/
def or(filterFun: (F, Context[F]) => Boolean): Pattern[T, F] = {
val filter = new IterativeCondition[F] {
val cleanFilter = cep.scala.cleanClosure(filterFun)
override def filter(value: F, ctx: Context[F]): Boolean = cleanFilter(value, ctx)
}
or(filter)
}
/**
* Specifies a filter condition which has to be fulfilled by an event in order to be matched.
*
* @param filterFun Filter condition
* @return The same pattern operator where the new filter condition is set
*/
def where(filterFun: (F, Context[F]) => Boolean): Pattern[T, F] = {
val filter = new IterativeCondition[F] {
val cleanFilter = cep.scala.cleanClosure(filterFun)
override def filter(value: F, ctx: Context[F]): Boolean = cleanFilter(value, ctx)
}
where(filter)
}
/**
* Specifies a filter condition which has to be fulfilled by an event in order to be matched.
*
* @param filterFun Filter condition
* @return The same pattern operator where the new filter condition is set
*/
def where(filterFun: F => Boolean): Pattern[T, F] = {
val filter = new IterativeCondition[F] {
val cleanFilter = cep.scala.cleanClosure(filterFun)
override def filter(value: F, ctx: Context[F]): Boolean = cleanFilter(value)
}
where(filter)
}
/**
*
* @return The previous pattern operator
*/
def getPrevious(): Option[Pattern[T, _ <: T]] = {
wrapPattern(jPattern.getPrevious())
}
/**
* Specifies that this pattern can occur zero or more times(kleene star).
* This means any number of events can be matched in this state.
*
* @return The same pattern with applied Kleene star operator
*/
def zeroOrMore: Pattern[T, F] = {
jPattern.zeroOrMore()
this
}
/**
* Specifies that this pattern can occur zero or more times(kleene star).
* This means any number of events can be matched in this state.
*
* If eagerness is enabled for a pattern A*B and sequence A1 A2 B will generate patterns:
* B, A1 B and A1 A2 B. If disabled B, A1 B, A2 B and A1 A2 B.
*
* @param eager if true the pattern always consumes earlier events
* @return The same pattern with applied Kleene star operator
*/
def zeroOrMore(eager: Boolean): Pattern[T, F] = {
jPattern.zeroOrMore(eager)
this
}
/**
* Specifies that this pattern can occur one or more times(kleene star).
* This means at least one and at most infinite number of events can be matched in this state.
*
* @return The same pattern with applied Kleene plus operator
*/
def oneOrMore: Pattern[T, F] = {
jPattern.oneOrMore()
this
}
/**
* Specifies that this pattern can occur one or more times(kleene star).
* This means at least one and at most infinite number of events can be matched in this state.
*
* If eagerness is enabled for a pattern A+B and sequence A1 A2 B will generate patterns:
* A1 B and A1 A2 B. If disabled A1 B, A2 B and A1 A2 B.
*
* @param eager if true the pattern always consumes earlier events
* @return The same pattern with applied Kleene plus operator
*/
def oneOrMore(eager: Boolean): Pattern[T, F] = {
jPattern.oneOrMore(eager)
this
}
/**
* Specifies that this pattern can occur zero or once.
*
* @return The same pattern with applied Kleene ? operator
*/
def optional: Pattern[T, F] = {
jPattern.optional()
this
}
/**
* Specifies exact number of times that this pattern should be matched.
*
* @param times number of times matching event must appear
* @return The same pattern with number of times applied
*/
def times(times: Int): Pattern[T, F] = {
jPattern.times(times)
this
}
/**
* Works in conjunction with [[org.apache.flink.cep.scala.pattern.Pattern#zeroOrMore()]],
* [[org.apache.flink.cep.scala.pattern.Pattern#oneOrMore()]] or
* [[org.apache.flink.cep.scala.pattern.Pattern#times(int)]].
* Specifies that any not matching element breaks the loop.
*
* <p>E.g. a pattern like:
* {{{
* Pattern.begin("start").where(_.getName().equals("c"))
* .followedBy("middle").where(_.getName().equals("a")).oneOrMore(true).consecutive()
* .followedBy("end1").where(_.getName().equals("b"));
* }}}
*
* <p>for a sequence: C D A1 A2 A3 D A4 B
*
* <p>will generate matches: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}
*
* <p><b>NOTICE:</b> This operator can be applied only when either zeroOrMore,
* oneOrMore or times was previously applied!
*
* <p>By default a relaxed continuity is applied.
* @return pattern with continuity changed to strict
*/
def consecutive(): Pattern[T, F] = {
jPattern.consecutive()
this
}
}
object Pattern {
/**
* Constructs a new Pattern by wrapping a given Java API Pattern
*
* @param jPattern Underlying Java API Pattern.
* @tparam T Base type of the elements appearing in the pattern
* @tparam F Subtype of T to which the current pattern operator is constrained
* @return New wrapping Pattern object
*/
def apply[T, F <: T](jPattern: JPattern[T, F]) = new Pattern[T, F](jPattern)
/**
* Starts a new pattern with the initial pattern operator whose name is provided. Furthermore,
* the base type of the event sequence is set.
*
* @param name Name of the new pattern operator
* @tparam X Base type of the event pattern
* @return The first pattern operator of a pattern
*/
def begin[X](name: String): Pattern[X, X] = Pattern(JPattern.begin(name))
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/ProcTimeBoundedRangeOver.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
import org.apache.flink.api.common.state.ValueState
import org.apache.flink.api.common.state.ValueStateDescriptor
import org.apache.flink.api.common.state.MapState
import org.apache.flink.api.common.state.MapStateDescriptor
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.ListTypeInfo
import java.util.{ArrayList, List => JList}
import org.apache.flink.api.common.typeinfo.BasicTypeInfo
import org.apache.flink.table.codegen.{GeneratedAggregationsFunction, Compiler}
import org.slf4j.LoggerFactory
/**
* Process Function used for the aggregate in bounded proc-time OVER window
* [[org.apache.flink.streaming.api.datastream.DataStream]]
*
* @param genAggregations Generated aggregate helper function
* @param precedingTimeBoundary Is used to indicate the processing time boundaries
* @param aggregatesTypeInfo row type info of aggregation
* @param inputType row type info of input row
*/
class ProcTimeBoundedRangeOver(
genAggregations: GeneratedAggregationsFunction,
precedingTimeBoundary: Long,
aggregatesTypeInfo: RowTypeInfo,
inputType: TypeInformation[Row])
extends ProcessFunction[Row, Row]
with Compiler[GeneratedAggregations] {
private var output: Row = _
private var accumulatorState: ValueState[Row] = _
private var rowMapState: MapState[Long, JList[Row]] = _
val LOG = LoggerFactory.getLogger(this.getClass)
private var function: GeneratedAggregations = _
override def open(config: Configuration) {
LOG.debug(s"Compiling AggregateHelper: $genAggregations.name \n\n " +
s"Code:\n$genAggregations.code")
val clazz = compile(
getRuntimeContext.getUserCodeClassLoader,
genAggregations.name,
genAggregations.code)
LOG.debug("Instantiating AggregateHelper.")
function = clazz.newInstance()
output = function.createOutputRow()
// We keep the elements received in a MapState indexed based on their ingestion time
val rowListTypeInfo: TypeInformation[JList[Row]] =
new ListTypeInfo[Row](inputType).asInstanceOf[TypeInformation[JList[Row]]]
val mapStateDescriptor: MapStateDescriptor[Long, JList[Row]] =
new MapStateDescriptor[Long, JList[Row]]("rowmapstate",
BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]], rowListTypeInfo)
rowMapState = getRuntimeContext.getMapState(mapStateDescriptor)
val stateDescriptor: ValueStateDescriptor[Row] =
new ValueStateDescriptor[Row]("overState", aggregatesTypeInfo)
accumulatorState = getRuntimeContext.getState(stateDescriptor)
}
override def processElement(
input: Row,
ctx: ProcessFunction[Row, Row]#Context,
out: Collector[Row]): Unit = {
val currentTime = ctx.timerService.currentProcessingTime
// buffer the event incoming event
// add current element to the window list of elements with corresponding timestamp
var rowList = rowMapState.get(currentTime)
// null value means that this si the first event received for this timestamp
if (rowList == null) {
rowList = new ArrayList[Row]()
// register timer to process event once the current millisecond passed
ctx.timerService.registerProcessingTimeTimer(currentTime + 1)
}
rowList.add(input)
rowMapState.put(currentTime, rowList)
}
override def onTimer(
timestamp: Long,
ctx: ProcessFunction[Row, Row]#OnTimerContext,
out: Collector[Row]): Unit = {
// we consider the original timestamp of events that have registered this time trigger 1 ms ago
val currentTime = timestamp - 1
var i = 0
// initialize the accumulators
var accumulators = accumulatorState.value()
if (null == accumulators) {
accumulators = function.createAccumulators()
}
// update the elements to be removed and retract them from aggregators
val limit = currentTime - precedingTimeBoundary
// we iterate through all elements in the window buffer based on timestamp keys
// when we find timestamps that are out of interest, we retrieve corresponding elements
// and eliminate them. Multiple elements could have been received at the same timestamp
// the removal of old elements happens only once per proctime as onTimer is called only once
val iter = rowMapState.keys.iterator
val markToRemove = new ArrayList[Long]()
while (iter.hasNext) {
val elementKey = iter.next
if (elementKey < limit) {
// element key outside of window. Retract values
val elementsRemove = rowMapState.get(elementKey)
var iRemove = 0
while (iRemove < elementsRemove.size()) {
val retractRow = elementsRemove.get(iRemove)
function.retract(accumulators, retractRow)
iRemove += 1
}
// mark element for later removal not to modify the iterator over MapState
markToRemove.add(elementKey)
}
}
// need to remove in 2 steps not to have concurrent access errors via iterator to the MapState
i = 0
while (i < markToRemove.size()) {
rowMapState.remove(markToRemove.get(i))
i += 1
}
// get the list of elements of current proctime
val currentElements = rowMapState.get(currentTime)
// add current elements to aggregator. Multiple elements might have arrived in the same proctime
// the same accumulator value will be computed for all elements
var iElemenets = 0
while (iElemenets < currentElements.size()) {
val input = currentElements.get(iElemenets)
function.accumulate(accumulators, input)
iElemenets += 1
}
// we need to build the output and emit for every event received at this proctime
iElemenets = 0
while (iElemenets < currentElements.size()) {
val input = currentElements.get(iElemenets)
// set the fields of the last event to carry on with the aggregates
function.setForwardedFields(input, output)
// add the accumulators values to result
function.setAggregationResults(accumulators, output)
out.collect(output)
iElemenets += 1
}
// update the value of accumulators for future incremental computation
accumulatorState.update(accumulators)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeFactory.scala | <filename>flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeFactory.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import org.apache.calcite.avatica.util.TimeUnit
import org.apache.calcite.jdbc.JavaTypeFactoryImpl
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeSystem}
import org.apache.calcite.sql.SqlIntervalQualifier
import org.apache.calcite.sql.`type`.SqlTypeName
import org.apache.calcite.sql.`type`.SqlTypeName._
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.flink.api.common.typeinfo.BasicTypeInfo._
import org.apache.flink.api.common.typeinfo.{NothingTypeInfo, PrimitiveArrayTypeInfo, SqlTimeTypeInfo, TypeInformation}
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.java.typeutils.{ObjectArrayTypeInfo, RowTypeInfo}
import org.apache.flink.api.java.typeutils.ValueTypeInfo._
import org.apache.flink.table.api.TableException
import org.apache.flink.table.plan.schema.{CompositeRelDataType, GenericRelDataType}
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
import org.apache.flink.table.typeutils.TypeCheckUtils.isSimple
import org.apache.flink.table.plan.schema.ArrayRelDataType
import org.apache.flink.table.calcite.FlinkTypeFactory.typeInfoToSqlTypeName
import org.apache.flink.types.Row
import scala.collection.mutable
import scala.collection.JavaConverters._
/**
* Flink specific type factory that represents the interface between Flink's [[TypeInformation]]
* and Calcite's [[RelDataType]].
*/
class FlinkTypeFactory(typeSystem: RelDataTypeSystem) extends JavaTypeFactoryImpl(typeSystem) {
// NOTE: for future data types it might be necessary to
// override more methods of RelDataTypeFactoryImpl
private val seenTypes = mutable.HashMap[TypeInformation[_], RelDataType]()
def createTypeFromTypeInfo(typeInfo: TypeInformation[_]): RelDataType = {
// simple type can be converted to SQL types and vice versa
if (isSimple(typeInfo)) {
val sqlType = typeInfoToSqlTypeName(typeInfo)
sqlType match {
case INTERVAL_YEAR_MONTH =>
createSqlIntervalType(
new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, SqlParserPos.ZERO))
case INTERVAL_DAY_SECOND =>
createSqlIntervalType(
new SqlIntervalQualifier(TimeUnit.DAY, TimeUnit.SECOND, SqlParserPos.ZERO))
case _ =>
createSqlType(sqlType)
}
}
// advanced types require specific RelDataType
// for storing the original TypeInformation
else {
seenTypes.getOrElseUpdate(typeInfo, canonize(createAdvancedType(typeInfo)))
}
}
/**
* Creates a struct type with the input fieldNames and input fieldTypes using FlinkTypeFactory
*
* @param fieldNames field names
* @param fieldTypes field types, every element is Flink's [[TypeInformation]]
* @return a struct type with the input fieldNames and input fieldTypes
*/
def buildRowDataType(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]])
: RelDataType = {
val rowDataTypeBuilder = builder
fieldNames
.zip(fieldTypes)
.foreach { f =>
rowDataTypeBuilder.add(f._1, createTypeFromTypeInfo(f._2)).nullable(true)
}
rowDataTypeBuilder.build
}
override def createSqlType(typeName: SqlTypeName, precision: Int): RelDataType = {
// it might happen that inferred VARCHAR types overflow as we set them to Int.MaxValue
// always set those to default value
if (typeName == VARCHAR && precision < 0) {
createSqlType(typeName, getTypeSystem.getDefaultPrecision(typeName))
} else {
super.createSqlType(typeName, precision)
}
}
override def createArrayType(elementType: RelDataType, maxCardinality: Long): RelDataType =
new ArrayRelDataType(
ObjectArrayTypeInfo.getInfoFor(FlinkTypeFactory.toTypeInfo(elementType)),
elementType,
true)
private def createAdvancedType(typeInfo: TypeInformation[_]): RelDataType = typeInfo match {
case ct: CompositeType[_] =>
new CompositeRelDataType(ct, this)
case pa: PrimitiveArrayTypeInfo[_] =>
new ArrayRelDataType(pa, createTypeFromTypeInfo(pa.getComponentType), false)
case oa: ObjectArrayTypeInfo[_, _] =>
new ArrayRelDataType(oa, createTypeFromTypeInfo(oa.getComponentInfo), true)
case ti: TypeInformation[_] =>
new GenericRelDataType(typeInfo, getTypeSystem.asInstanceOf[FlinkTypeSystem])
case ti@_ =>
throw TableException(s"Unsupported type information: $ti")
}
override def createTypeWithNullability(
relDataType: RelDataType,
nullable: Boolean)
: RelDataType = relDataType match {
case composite: CompositeRelDataType =>
// at the moment we do not care about nullability
canonize(composite)
case array: ArrayRelDataType =>
val elementType = createTypeWithNullability(array.getComponentType, nullable)
canonize(new ArrayRelDataType(array.typeInfo, elementType, nullable))
case _ =>
super.createTypeWithNullability(relDataType, nullable)
}
}
object FlinkTypeFactory {
private def typeInfoToSqlTypeName(typeInfo: TypeInformation[_]): SqlTypeName = typeInfo match {
case BOOLEAN_TYPE_INFO => BOOLEAN
case BYTE_TYPE_INFO => TINYINT
case SHORT_TYPE_INFO => SMALLINT
case INT_TYPE_INFO => INTEGER
case LONG_TYPE_INFO => BIGINT
case FLOAT_TYPE_INFO => FLOAT
case DOUBLE_TYPE_INFO => DOUBLE
case STRING_TYPE_INFO => VARCHAR
case BIG_DEC_TYPE_INFO => DECIMAL
// temporal types
case SqlTimeTypeInfo.DATE => DATE
case SqlTimeTypeInfo.TIME => TIME
case SqlTimeTypeInfo.TIMESTAMP => TIMESTAMP
case TimeIntervalTypeInfo.INTERVAL_MONTHS => INTERVAL_YEAR_MONTH
case TimeIntervalTypeInfo.INTERVAL_MILLIS => INTERVAL_DAY_SECOND
case CHAR_TYPE_INFO | CHAR_VALUE_TYPE_INFO =>
throw TableException("Character type is not supported.")
case _@t =>
throw TableException(s"Type is not supported: $t")
}
/**
* Converts a Calcite logical record into a Flink type information.
*/
def toInternalRowTypeInfo(logicalRowType: RelDataType): TypeInformation[Row] = {
// convert to type information
val logicalFieldTypes = logicalRowType.getFieldList.asScala map { relDataType =>
FlinkTypeFactory.toTypeInfo(relDataType.getType)
}
// field names
val logicalFieldNames = logicalRowType.getFieldNames.asScala
new RowTypeInfo(logicalFieldTypes.toArray, logicalFieldNames.toArray)
}
def toTypeInfo(relDataType: RelDataType): TypeInformation[_] = relDataType.getSqlTypeName match {
case BOOLEAN => BOOLEAN_TYPE_INFO
case TINYINT => BYTE_TYPE_INFO
case SMALLINT => SHORT_TYPE_INFO
case INTEGER => INT_TYPE_INFO
case BIGINT => LONG_TYPE_INFO
case FLOAT => FLOAT_TYPE_INFO
case DOUBLE => DOUBLE_TYPE_INFO
case VARCHAR | CHAR => STRING_TYPE_INFO
case DECIMAL => BIG_DEC_TYPE_INFO
// temporal types
case DATE => SqlTimeTypeInfo.DATE
case TIME => SqlTimeTypeInfo.TIME
case TIMESTAMP => SqlTimeTypeInfo.TIMESTAMP
case typeName if YEAR_INTERVAL_TYPES.contains(typeName) => TimeIntervalTypeInfo.INTERVAL_MONTHS
case typeName if DAY_INTERVAL_TYPES.contains(typeName) => TimeIntervalTypeInfo.INTERVAL_MILLIS
case NULL =>
throw TableException("Type NULL is not supported. Null values must have a supported type.")
// symbol for special flags e.g. TRIM's BOTH, LEADING, TRAILING
// are represented as integer
case SYMBOL => INT_TYPE_INFO
// extract encapsulated TypeInformation
case ANY if relDataType.isInstanceOf[GenericRelDataType] =>
val genericRelDataType = relDataType.asInstanceOf[GenericRelDataType]
genericRelDataType.typeInfo
case ROW if relDataType.isInstanceOf[CompositeRelDataType] =>
val compositeRelDataType = relDataType.asInstanceOf[CompositeRelDataType]
compositeRelDataType.compositeType
// ROW and CURSOR for UDTF case, whose type info will never be used, just a placeholder
case ROW | CURSOR => new NothingTypeInfo
case ARRAY if relDataType.isInstanceOf[ArrayRelDataType] =>
val arrayRelDataType = relDataType.asInstanceOf[ArrayRelDataType]
arrayRelDataType.typeInfo
case _@t =>
throw TableException(s"Type is not supported: $t")
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/batch/table/stringexpr/AggregationsStringExpressionTest.scala | <filename>flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/batch/table/stringexpr/AggregationsStringExpressionTest.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.batch.table.stringexpr
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.scala.batch.utils.LogicalPlanFormatUtils
import org.junit._
class AggregationsStringExpressionTest {
@Test
def testAggregationTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
val t1 = t.select('_1.sum, '_1.min, '_1.max, '_1.count, '_1.avg)
val t2 = t.select("_1.sum, _1.min, _1.max, _1.count, _1.avg")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testWorkingAggregationDataTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
val t1 = t.select('_1.avg, '_2.avg, '_3.avg, '_4.avg, '_5.avg, '_6.avg, '_7.count)
val t2 = t.select("_1.avg, _2.avg, _3.avg, _4.avg, _5.avg, _6.avg, _7.count")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = env.fromElements(
(1: Byte, 1: Short),
(2: Byte, 2: Short)).toTable(tEnv)
val t1 = t.select('_1.avg, '_1.sum, '_1.count, '_2.avg, '_2.sum)
val t2 = t.select("_1.avg, _1.sum, _1.count, _2.avg, _2.sum")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testAggregationWithArithmetic(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
val t1 = t.select(('_1 + 2).avg + 2, '_2.count + 5)
val t2 = t.select("(_1 + 2).avg + 2, _2.count + 5")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testAggregationWithTwoCount(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
val t1 = t.select('_1.count, '_2.count)
val t2 = t.select("_1.count, _2.count")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testAggregationAfterProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
val t1 = t.select('_1, '_2, '_3)
.select('_1.avg, '_2.sum, '_3.count)
val t2 = t.select("_1, _2, _3")
.select("_1.avg, _2.sum, _3.count")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testDistinct(): Unit = {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val distinct = ds.select('b).distinct()
val distinct2 = ds.select("b").distinct()
val lPlan1 = distinct.logicalPlan
val lPlan2 = distinct2.logicalPlan
Assert.assertEquals("Logical Plans do not match", lPlan1, lPlan2)
}
@Test
def testDistinctAfterAggregate(): Unit = {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val ds = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
val distinct = ds.groupBy('a, 'e).select('e).distinct()
val distinct2 = ds.groupBy("a, e").select("e").distinct()
val lPlan1 = distinct.logicalPlan
val lPlan2 = distinct2.logicalPlan
Assert.assertEquals("Logical Plans do not match", lPlan1, lPlan2)
}
@Test
def testGroupedAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val t1 = t.groupBy('b).select('b, 'a.sum)
val t2 = t.groupBy("b").select("b, a.sum")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testGroupingKeyForwardIfNotUsed(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val t1 = t.groupBy('b).select('a.sum)
val t2 = t.groupBy("b").select("a.sum")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testGroupNoAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val t1 = t
.groupBy('b)
.select('a.sum as 'd, 'b)
.groupBy('b, 'd)
.select('b)
val t2 = t
.groupBy("b")
.select("a.sum as d, b")
.groupBy("b, d")
.select("b")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testGroupedAggregateWithConstant1(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val t1 = t.select('a, 4 as 'four, 'b)
.groupBy('four, 'a)
.select('four, 'b.sum)
val t2 = t.select("a, 4 as four, b")
.groupBy("four, a")
.select("four, b.sum")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testGroupedAggregateWithConstant2(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val t1 = t.select('b, 4 as 'four, 'a)
.groupBy('b, 'four)
.select('four, 'a.sum)
val t2 = t.select("b, 4 as four, a")
.groupBy("b, four")
.select("four, a.sum")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testGroupedAggregateWithExpression(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
val t1 = t.groupBy('e, 'b % 3)
.select('c.min, 'e, 'a.avg, 'd.count)
val t2 = t.groupBy("e, b % 3")
.select("c.min, e, a.avg, d.count")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
@Test
def testGroupedAggregateWithFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val t1 = t.groupBy('b)
.select('b, 'a.sum)
.where('b === 2)
val t2 = t.groupBy("b")
.select("b, a.sum")
.where("b = 2")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match",
LogicalPlanFormatUtils.formatTempTableId(lPlan1.toString),
LogicalPlanFormatUtils.formatTempTableId(lPlan2.toString))
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/BatchTableSourceScan.scala | <filename>flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/BatchTableSourceScan.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.flink.api.java.DataSet
import org.apache.flink.table.api.BatchTableEnvironment
import org.apache.flink.table.plan.nodes.PhysicalTableSourceScan
import org.apache.flink.table.plan.schema.TableSourceTable
import org.apache.flink.table.sources.{BatchTableSource, TableSource}
import org.apache.flink.types.Row
/** Flink RelNode to read data from an external source defined by a [[BatchTableSource]]. */
class BatchTableSourceScan(
cluster: RelOptCluster,
traitSet: RelTraitSet,
table: RelOptTable,
tableSource: BatchTableSource[_])
extends PhysicalTableSourceScan(cluster, traitSet, table, tableSource)
with BatchScan {
override def computeSelfCost(planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val rowCnt = metadata.getRowCount(this)
planner.getCostFactory.makeCost(rowCnt, rowCnt, rowCnt * estimateRowSize(getRowType))
}
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new BatchTableSourceScan(
cluster,
traitSet,
getTable,
tableSource
)
}
override def copy(
traitSet: RelTraitSet,
newTableSource: TableSource[_])
: PhysicalTableSourceScan = {
new BatchTableSourceScan(
cluster,
traitSet,
getTable,
newTableSource.asInstanceOf[BatchTableSource[_]]
)
}
override def translateToPlan(tableEnv: BatchTableEnvironment): DataSet[Row] = {
val config = tableEnv.getConfig
val inputDataSet = tableSource.getDataSet(tableEnv.execEnv).asInstanceOf[DataSet[Any]]
convertToInternalRow(inputDataSet, new TableSourceTable(tableSource), config)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TimeModeIndicatorFunctions.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions
import java.nio.charset.Charset
import java.util
import org.apache.calcite.rel.`type`._
import org.apache.calcite.sql._
import org.apache.calcite.sql.`type`.{OperandTypes, ReturnTypes, SqlTypeFamily, SqlTypeName}
import org.apache.calcite.sql.validate.SqlMonotonicity
import org.apache.calcite.tools.RelBuilder
import org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo
import org.apache.flink.table.api.TableException
import org.apache.flink.table.expressions.LeafExpression
object EventTimeExtractor extends SqlFunction("ROWTIME", SqlKind.OTHER_FUNCTION,
ReturnTypes.explicit(TimeModeTypes.ROWTIME), null, OperandTypes.NILADIC,
SqlFunctionCategory.SYSTEM) {
override def getSyntax: SqlSyntax = SqlSyntax.FUNCTION
override def getMonotonicity(call: SqlOperatorBinding): SqlMonotonicity =
SqlMonotonicity.INCREASING
}
object ProcTimeExtractor extends SqlFunction("PROCTIME", SqlKind.OTHER_FUNCTION,
ReturnTypes.explicit(TimeModeTypes.PROCTIME), null, OperandTypes.NILADIC,
SqlFunctionCategory.SYSTEM) {
override def getSyntax: SqlSyntax = SqlSyntax.FUNCTION
override def getMonotonicity(call: SqlOperatorBinding): SqlMonotonicity =
SqlMonotonicity.INCREASING
}
abstract class TimeIndicator extends LeafExpression {
/**
* Returns the [[org.apache.flink.api.common.typeinfo.TypeInformation]]
* for evaluating this expression.
* It is sometimes not available until the expression is valid.
*/
override private[flink] def resultType = SqlTimeTypeInfo.TIMESTAMP
/**
* Convert Expression to its counterpart in Calcite, i.e. RexNode
*/
override private[flink] def toRexNode(implicit relBuilder: RelBuilder) =
throw new TableException("indicator functions (e.g. proctime() and rowtime()" +
" are not executable. Please check your expressions.")
}
case class RowTime() extends TimeIndicator
case class ProcTime() extends TimeIndicator
object TimeModeTypes {
// indicator data type for row time (event time)
val ROWTIME = new RowTimeType
// indicator data type for processing time
val PROCTIME = new ProcTimeType
}
class RowTimeType extends TimeModeType {
override def toString(): String = "ROWTIME"
override def getFullTypeString: String = "ROWTIME_INDICATOR"
}
class ProcTimeType extends TimeModeType {
override def toString(): String = "PROCTIME"
override def getFullTypeString: String = "PROCTIME_INDICATOR"
}
abstract class TimeModeType extends RelDataType {
override def getComparability: RelDataTypeComparability = RelDataTypeComparability.NONE
override def isStruct: Boolean = false
override def getFieldList: util.List[RelDataTypeField] = null
override def getFieldNames: util.List[String] = null
override def getFieldCount: Int = 0
override def getStructKind: StructKind = StructKind.NONE
override def getField(
fieldName: String,
caseSensitive: Boolean,
elideRecord: Boolean): RelDataTypeField = null
override def isNullable: Boolean = false
override def getComponentType: RelDataType = null
override def getKeyType: RelDataType = null
override def getValueType: RelDataType = null
override def getCharset: Charset = null
override def getCollation: SqlCollation = null
override def getIntervalQualifier: SqlIntervalQualifier = null
override def getPrecision: Int = -1
override def getScale: Int = -1
override def getSqlTypeName: SqlTypeName = SqlTypeName.TIMESTAMP
override def getSqlIdentifier: SqlIdentifier = null
override def getFamily: RelDataTypeFamily = SqlTypeFamily.NUMERIC
override def getPrecedenceList: RelDataTypePrecedenceList = ???
override def isDynamicStruct: Boolean = false
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/ExpressionParser.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import org.apache.calcite.avatica.util.DateTimeUtils.{MILLIS_PER_DAY, MILLIS_PER_HOUR, MILLIS_PER_MINUTE, MILLIS_PER_SECOND}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, SqlTimeTypeInfo, TypeInformation}
import org.apache.flink.table.api.ExpressionParserException
import org.apache.flink.table.expressions.ExpressionUtils.{toMilliInterval, toMonthInterval}
import org.apache.flink.table.expressions.TimeIntervalUnit.TimeIntervalUnit
import org.apache.flink.table.expressions.TimePointUnit.TimePointUnit
import org.apache.flink.table.expressions.TrimMode.TrimMode
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
import scala.language.implicitConversions
import scala.util.parsing.combinator.{JavaTokenParsers, PackratParsers}
/**
* Parser for expressions inside a String. This parses exactly the same expressions that
* would be accepted by the Scala Expression DSL.
*
* See [[org.apache.flink.table.api.scala.ImplicitExpressionConversions]] and
* [[org.apache.flink.table.api.scala.ImplicitExpressionOperations]] for the constructs
* available in the Scala Expression DSL. This parser must be kept in sync with the Scala DSL
* lazy valined in the above files.
*/
object ExpressionParser extends JavaTokenParsers with PackratParsers {
case class Keyword(key: String)
// Convert the keyword into an case insensitive Parser
implicit def keyword2Parser(kw: Keyword): Parser[String] = {
("""(?i)\Q""" + kw.key + """\E""").r
}
// Keyword
lazy val ARRAY: Keyword = Keyword("Array")
lazy val AS: Keyword = Keyword("as")
lazy val COUNT: Keyword = Keyword("count")
lazy val AVG: Keyword = Keyword("avg")
lazy val MIN: Keyword = Keyword("min")
lazy val MAX: Keyword = Keyword("max")
lazy val SUM: Keyword = Keyword("sum")
lazy val START: Keyword = Keyword("start")
lazy val END: Keyword = Keyword("end")
lazy val CAST: Keyword = Keyword("cast")
lazy val NULL: Keyword = Keyword("Null")
lazy val IF: Keyword = Keyword("?")
lazy val ASC: Keyword = Keyword("asc")
lazy val DESC: Keyword = Keyword("desc")
lazy val TO_DATE: Keyword = Keyword("toDate")
lazy val TO_TIME: Keyword = Keyword("toTime")
lazy val TO_TIMESTAMP: Keyword = Keyword("toTimestamp")
lazy val TRIM: Keyword = Keyword("trim")
lazy val EXTRACT: Keyword = Keyword("extract")
lazy val FLOOR: Keyword = Keyword("floor")
lazy val CEIL: Keyword = Keyword("ceil")
lazy val YEARS: Keyword = Keyword("years")
lazy val YEAR: Keyword = Keyword("year")
lazy val MONTHS: Keyword = Keyword("months")
lazy val MONTH: Keyword = Keyword("month")
lazy val DAYS: Keyword = Keyword("days")
lazy val DAY: Keyword = Keyword("day")
lazy val HOURS: Keyword = Keyword("hours")
lazy val HOUR: Keyword = Keyword("hour")
lazy val MINUTES: Keyword = Keyword("minutes")
lazy val MINUTE: Keyword = Keyword("minute")
lazy val SECONDS: Keyword = Keyword("seconds")
lazy val SECOND: Keyword = Keyword("second")
lazy val MILLIS: Keyword = Keyword("millis")
lazy val MILLI: Keyword = Keyword("milli")
lazy val ROWS: Keyword = Keyword("rows")
lazy val STAR: Keyword = Keyword("*")
lazy val GET: Keyword = Keyword("get")
lazy val FLATTEN: Keyword = Keyword("flatten")
def functionIdent: ExpressionParser.Parser[String] =
not(ARRAY) ~ not(AS) ~ not(COUNT) ~ not(AVG) ~ not(MIN) ~ not(MAX) ~
not(SUM) ~ not(START) ~ not(END)~ not(CAST) ~ not(NULL) ~
not(IF) ~> super.ident
// symbols
lazy val timeIntervalUnit: PackratParser[Expression] = TimeIntervalUnit.values map {
case unit: TimeIntervalUnit => literal(unit.toString) ^^^ unit.toExpr
} reduceLeft(_ | _)
lazy val timePointUnit: PackratParser[Expression] = TimePointUnit.values map {
case unit: TimePointUnit => literal(unit.toString) ^^^ unit.toExpr
} reduceLeft(_ | _)
lazy val trimMode: PackratParser[Expression] = TrimMode.values map {
case mode: TrimMode => literal(mode.toString) ^^^ mode.toExpr
} reduceLeft(_ | _)
// data types
lazy val dataType: PackratParser[TypeInformation[_]] =
"BYTE" ^^ { ti => BasicTypeInfo.BYTE_TYPE_INFO } |
"SHORT" ^^ { ti => BasicTypeInfo.SHORT_TYPE_INFO } |
"INTERVAL_MONTHS" ^^ {
ti => TimeIntervalTypeInfo.INTERVAL_MONTHS.asInstanceOf[TypeInformation[_]]
} |
"INTERVAL_MILLIS" ^^ {
ti => TimeIntervalTypeInfo.INTERVAL_MILLIS.asInstanceOf[TypeInformation[_]]
} |
"INT" ^^ { ti => BasicTypeInfo.INT_TYPE_INFO } |
"LONG" ^^ { ti => BasicTypeInfo.LONG_TYPE_INFO } |
"FLOAT" ^^ { ti => BasicTypeInfo.FLOAT_TYPE_INFO } |
"DOUBLE" ^^ { ti => BasicTypeInfo.DOUBLE_TYPE_INFO } |
("BOOLEAN" | "BOOL") ^^ { ti => BasicTypeInfo.BOOLEAN_TYPE_INFO } |
"STRING" ^^ { ti => BasicTypeInfo.STRING_TYPE_INFO } |
"DATE" ^^ { ti => SqlTimeTypeInfo.DATE.asInstanceOf[TypeInformation[_]] } |
"TIMESTAMP" ^^ { ti => SqlTimeTypeInfo.TIMESTAMP } |
"TIME" ^^ { ti => SqlTimeTypeInfo.TIME } |
"DECIMAL" ^^ { ti => BasicTypeInfo.BIG_DEC_TYPE_INFO }
// Literals
// same as floatingPointNumber but we do not allow trailing dot "12.d" or "2."
lazy val floatingPointNumberFlink: Parser[String] =
"""-?(\d+(\.\d+)?|\d*\.\d+)([eE][+-]?\d+)?[fFdD]?""".r
lazy val numberLiteral: PackratParser[Expression] =
(wholeNumber <~ ("l" | "L")) ^^ { n => Literal(n.toLong) } |
(decimalNumber <~ ("p" | "P")) ^^ { n => Literal(BigDecimal(n)) } |
(floatingPointNumberFlink | decimalNumber) ^^ {
n =>
if (n.matches("""-?\d+""")) {
Literal(n.toInt)
} else if (n.endsWith("f") || n.endsWith("F")) {
Literal(n.toFloat)
} else {
Literal(n.toDouble)
}
}
lazy val singleQuoteStringLiteral: Parser[Expression] =
("'" + """([^'\p{Cntrl}\\]|\\[\\'"bfnrt]|\\u[a-fA-F0-9]{4})*""" + "'").r ^^ {
str => Literal(str.substring(1, str.length - 1))
}
lazy val stringLiteralFlink: PackratParser[Expression] = super.stringLiteral ^^ {
str => Literal(str.substring(1, str.length - 1))
}
lazy val boolLiteral: PackratParser[Expression] = ("true" | "false") ^^ {
str => Literal(str.toBoolean)
}
lazy val nullLiteral: PackratParser[Expression] = NULL ~ "(" ~> dataType <~ ")" ^^ {
dt => Null(dt)
}
lazy val literalExpr: PackratParser[Expression] =
numberLiteral | stringLiteralFlink | singleQuoteStringLiteral | boolLiteral | nullLiteral
lazy val fieldReference: PackratParser[NamedExpression] = (STAR | ident) ^^ {
sym => UnresolvedFieldReference(sym)
}
lazy val atom: PackratParser[Expression] =
( "(" ~> expression <~ ")" ) | literalExpr | fieldReference
// suffix operators
lazy val suffixSum: PackratParser[Expression] =
composite <~ "." ~ SUM ~ opt("()") ^^ { e => Sum(e) }
lazy val suffixMin: PackratParser[Expression] =
composite <~ "." ~ MIN ~ opt("()") ^^ { e => Min(e) }
lazy val suffixMax: PackratParser[Expression] =
composite <~ "." ~ MAX ~ opt("()") ^^ { e => Max(e) }
lazy val suffixCount: PackratParser[Expression] =
composite <~ "." ~ COUNT ~ opt("()") ^^ { e => Count(e) }
lazy val suffixAvg: PackratParser[Expression] =
composite <~ "." ~ AVG ~ opt("()") ^^ { e => Avg(e) }
lazy val suffixStart: PackratParser[Expression] =
composite <~ "." ~ START ~ opt("()") ^^ { e => WindowStart(e) }
lazy val suffixEnd: PackratParser[Expression] =
composite <~ "." ~ END ~ opt("()") ^^ { e => WindowEnd(e) }
lazy val suffixCast: PackratParser[Expression] =
composite ~ "." ~ CAST ~ "(" ~ dataType ~ ")" ^^ {
case e ~ _ ~ _ ~ _ ~ dt ~ _ => Cast(e, dt)
}
lazy val suffixAs: PackratParser[Expression] =
composite ~ "." ~ AS ~ "(" ~ rep1sep(fieldReference, ",") ~ ")" ^^ {
case e ~ _ ~ _ ~ _ ~ target ~ _ => Alias(e, target.head.name, target.tail.map(_.name))
}
lazy val suffixTrim = composite ~ "." ~ TRIM ~ "(" ~ trimMode ~ "," ~ expression ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ mode ~ _ ~ trimCharacter ~ _ => Trim(mode, trimCharacter, operand)
}
lazy val suffixTrimWithoutArgs = composite <~ "." ~ TRIM ~ opt("()") ^^ {
e => Trim(TrimMode.BOTH, TrimConstants.TRIM_DEFAULT_CHAR, e)
}
lazy val suffixIf: PackratParser[Expression] =
composite ~ "." ~ IF ~ "(" ~ expression ~ "," ~ expression ~ ")" ^^ {
case condition ~ _ ~ _ ~ _ ~ ifTrue ~ _ ~ ifFalse ~ _ => If(condition, ifTrue, ifFalse)
}
lazy val suffixExtract = composite ~ "." ~ EXTRACT ~ "(" ~ timeIntervalUnit ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ unit ~ _ => Extract(unit, operand)
}
lazy val suffixFloor = composite ~ "." ~ FLOOR ~ "(" ~ timeIntervalUnit ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ unit ~ _ => TemporalFloor(unit, operand)
}
lazy val suffixCeil = composite ~ "." ~ CEIL ~ "(" ~ timeIntervalUnit ~ ")" ^^ {
case operand ~ _ ~ _ ~ _ ~ unit ~ _ => TemporalCeil(unit, operand)
}
lazy val suffixFunctionCall =
composite ~ "." ~ functionIdent ~ "(" ~ repsep(expression, ",") ~ ")" ^^ {
case operand ~ _ ~ name ~ _ ~ args ~ _ => Call(name.toUpperCase, operand :: args)
}
lazy val suffixFunctionCallOneArg = composite ~ "." ~ functionIdent ^^ {
case operand ~ _ ~ name => Call(name.toUpperCase, Seq(operand))
}
lazy val suffixAsc : PackratParser[Expression] =
atom <~ "." ~ ASC ~ opt("()") ^^ { e => Asc(e) }
lazy val suffixDesc : PackratParser[Expression] =
atom <~ "." ~ DESC ~ opt("()") ^^ { e => Desc(e) }
lazy val suffixToDate: PackratParser[Expression] =
composite <~ "." ~ TO_DATE ~ opt("()") ^^ { e => Cast(e, SqlTimeTypeInfo.DATE) }
lazy val suffixToTimestamp: PackratParser[Expression] =
composite <~ "." ~ TO_TIMESTAMP ~ opt("()") ^^ { e => Cast(e, SqlTimeTypeInfo.TIMESTAMP) }
lazy val suffixToTime: PackratParser[Expression] =
composite <~ "." ~ TO_TIME ~ opt("()") ^^ { e => Cast(e, SqlTimeTypeInfo.TIME) }
lazy val suffixTimeInterval : PackratParser[Expression] =
composite ~ "." ~ (YEARS | MONTHS | DAYS | HOURS | MINUTES | SECONDS | MILLIS |
YEAR | MONTH | DAY | HOUR | MINUTE | SECOND | MILLI) ^^ {
case expr ~ _ ~ (YEARS.key | YEAR.key) => toMonthInterval(expr, 12)
case expr ~ _ ~ (MONTHS.key | MONTH.key) => toMonthInterval(expr, 1)
case expr ~ _ ~ (DAYS.key | DAY.key) => toMilliInterval(expr, MILLIS_PER_DAY)
case expr ~ _ ~ (HOURS.key | HOUR.key) => toMilliInterval(expr, MILLIS_PER_HOUR)
case expr ~ _ ~ (MINUTES.key | MINUTE.key) => toMilliInterval(expr, MILLIS_PER_MINUTE)
case expr ~ _ ~ (SECONDS.key | SECOND.key) => toMilliInterval(expr, MILLIS_PER_SECOND)
case expr ~ _ ~ (MILLIS.key | MILLI.key)=> toMilliInterval(expr, 1)
}
lazy val suffixRowInterval : PackratParser[Expression] =
composite <~ "." ~ ROWS ^^ { e => ExpressionUtils.toRowInterval(e) }
lazy val suffixGet: PackratParser[Expression] =
composite ~ "." ~ GET ~ "(" ~ literalExpr ~ ")" ^^ {
case e ~ _ ~ _ ~ _ ~ index ~ _ =>
GetCompositeField(e, index.asInstanceOf[Literal].value)
}
lazy val suffixFlattening: PackratParser[Expression] =
composite <~ "." ~ FLATTEN ~ opt("()") ^^ { e => Flattening(e) }
lazy val suffixed: PackratParser[Expression] =
suffixTimeInterval | suffixRowInterval | suffixSum | suffixMin | suffixMax | suffixStart |
suffixEnd | suffixCount | suffixAvg | suffixCast | suffixAs | suffixTrim |
suffixTrimWithoutArgs | suffixIf | suffixAsc | suffixDesc | suffixToDate |
suffixToTimestamp | suffixToTime | suffixExtract | suffixFloor | suffixCeil |
suffixGet | suffixFlattening |
suffixFunctionCall | suffixFunctionCallOneArg // function call must always be at the end
// prefix operators
lazy val prefixArray: PackratParser[Expression] =
ARRAY ~ "(" ~> repsep(expression, ",") <~ ")" ^^ { elements => ArrayConstructor(elements) }
lazy val prefixSum: PackratParser[Expression] =
SUM ~ "(" ~> expression <~ ")" ^^ { e => Sum(e) }
lazy val prefixMin: PackratParser[Expression] =
MIN ~ "(" ~> expression <~ ")" ^^ { e => Min(e) }
lazy val prefixMax: PackratParser[Expression] =
MAX ~ "(" ~> expression <~ ")" ^^ { e => Max(e) }
lazy val prefixCount: PackratParser[Expression] =
COUNT ~ "(" ~> expression <~ ")" ^^ { e => Count(e) }
lazy val prefixAvg: PackratParser[Expression] =
AVG ~ "(" ~> expression <~ ")" ^^ { e => Avg(e) }
lazy val prefixStart: PackratParser[Expression] =
START ~ "(" ~> expression <~ ")" ^^ { e => WindowStart(e) }
lazy val prefixEnd: PackratParser[Expression] =
END ~ "(" ~> expression <~ ")" ^^ { e => WindowEnd(e) }
lazy val prefixCast: PackratParser[Expression] =
CAST ~ "(" ~ expression ~ "," ~ dataType ~ ")" ^^ {
case _ ~ _ ~ e ~ _ ~ dt ~ _ => Cast(e, dt)
}
lazy val prefixAs: PackratParser[Expression] =
AS ~ "(" ~ expression ~ "," ~ rep1sep(fieldReference, ",") ~ ")" ^^ {
case _ ~ _ ~ e ~ _ ~ target ~ _ => Alias(e, target.head.name, target.tail.map(_.name))
}
lazy val prefixIf: PackratParser[Expression] =
IF ~ "(" ~ expression ~ "," ~ expression ~ "," ~ expression ~ ")" ^^ {
case _ ~ _ ~ condition ~ _ ~ ifTrue ~ _ ~ ifFalse ~ _ => If(condition, ifTrue, ifFalse)
}
lazy val prefixFunctionCall = functionIdent ~ "(" ~ repsep(expression, ",") ~ ")" ^^ {
case name ~ _ ~ args ~ _ => Call(name.toUpperCase, args)
}
lazy val prefixFunctionCallOneArg = functionIdent ~ "(" ~ expression ~ ")" ^^ {
case name ~ _ ~ arg ~ _ => Call(name.toUpperCase, Seq(arg))
}
lazy val prefixTrim = TRIM ~ "(" ~ trimMode ~ "," ~ expression ~ "," ~ expression ~ ")" ^^ {
case _ ~ _ ~ mode ~ _ ~ trimCharacter ~ _ ~ operand ~ _ => Trim(mode, trimCharacter, operand)
}
lazy val prefixTrimWithoutArgs = TRIM ~ "(" ~ expression ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ => Trim(TrimMode.BOTH, TrimConstants.TRIM_DEFAULT_CHAR, operand)
}
lazy val prefixExtract = EXTRACT ~ "(" ~ expression ~ "," ~ timeIntervalUnit ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ ~ unit ~ _ => Extract(unit, operand)
}
lazy val prefixFloor = FLOOR ~ "(" ~ expression ~ "," ~ timeIntervalUnit ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ ~ unit ~ _ => TemporalFloor(unit, operand)
}
lazy val prefixCeil = CEIL ~ "(" ~ expression ~ "," ~ timeIntervalUnit ~ ")" ^^ {
case _ ~ _ ~ operand ~ _ ~ unit ~ _ => TemporalCeil(unit, operand)
}
lazy val prefixGet: PackratParser[Expression] =
GET ~ "(" ~ composite ~ "," ~ literalExpr ~ ")" ^^ {
case _ ~ _ ~ e ~ _ ~ index ~ _ =>
GetCompositeField(e, index.asInstanceOf[Literal].value)
}
lazy val prefixFlattening: PackratParser[Expression] =
FLATTEN ~ "(" ~> composite <~ ")" ^^ { e => Flattening(e) }
lazy val prefixed: PackratParser[Expression] =
prefixArray | prefixSum | prefixMin | prefixMax | prefixCount | prefixAvg |
prefixStart | prefixEnd | prefixCast | prefixAs | prefixTrim | prefixTrimWithoutArgs |
prefixIf | prefixExtract | prefixFloor | prefixCeil | prefixGet | prefixFlattening |
prefixFunctionCall | prefixFunctionCallOneArg // function call must always be at the end
// suffix/prefix composite
lazy val composite: PackratParser[Expression] = suffixed | prefixed | atom |
failure("Composite expression expected.")
// unary ops
lazy val unaryNot: PackratParser[Expression] = "!" ~> composite ^^ { e => Not(e) }
lazy val unaryMinus: PackratParser[Expression] = "-" ~> composite ^^ { e => UnaryMinus(e) }
lazy val unaryPlus: PackratParser[Expression] = "+" ~> composite ^^ { e => e }
lazy val unary = composite | unaryNot | unaryMinus | unaryPlus |
failure("Unary expression expected.")
// arithmetic
lazy val product = unary * (
"*" ^^^ { (a:Expression, b:Expression) => Mul(a,b) } |
"/" ^^^ { (a:Expression, b:Expression) => Div(a,b) } |
"%" ^^^ { (a:Expression, b:Expression) => Mod(a,b) } ) |
failure("Product expected.")
lazy val term = product * (
"+" ^^^ { (a:Expression, b:Expression) => Plus(a,b) } |
"-" ^^^ { (a:Expression, b:Expression) => Minus(a,b) } ) |
failure("Term expected.")
// Comparison
lazy val equalTo: PackratParser[Expression] = term ~ ("===" | "==" | "=") ~ term ^^ {
case l ~ _ ~ r => EqualTo(l, r)
}
lazy val notEqualTo: PackratParser[Expression] = term ~ ("!==" | "!=" | "<>") ~ term ^^ {
case l ~ _ ~ r => NotEqualTo(l, r)
}
lazy val greaterThan: PackratParser[Expression] = term ~ ">" ~ term ^^ {
case l ~ _ ~ r => GreaterThan(l, r)
}
lazy val greaterThanOrEqual: PackratParser[Expression] = term ~ ">=" ~ term ^^ {
case l ~ _ ~ r => GreaterThanOrEqual(l, r)
}
lazy val lessThan: PackratParser[Expression] = term ~ "<" ~ term ^^ {
case l ~ _ ~ r => LessThan(l, r)
}
lazy val lessThanOrEqual: PackratParser[Expression] = term ~ "<=" ~ term ^^ {
case l ~ _ ~ r => LessThanOrEqual(l, r)
}
lazy val comparison: PackratParser[Expression] =
equalTo | notEqualTo |
greaterThan | greaterThanOrEqual |
lessThan | lessThanOrEqual | term |
failure("Comparison expected.")
// logic
lazy val logic = comparison * (
"&&" ^^^ { (a:Expression, b:Expression) => And(a,b) } |
"||" ^^^ { (a:Expression, b:Expression) => Or(a,b) } ) |
failure("Logic expected.")
// alias
lazy val alias: PackratParser[Expression] = logic ~ AS ~ fieldReference ^^ {
case e ~ _ ~ name => Alias(e, name.name)
} | logic ~ AS ~ "(" ~ rep1sep(fieldReference, ",") ~ ")" ^^ {
case e ~ _ ~ _ ~ names ~ _ => Alias(e, names.head.name, names.tail.map(_.name))
} | logic
lazy val expression: PackratParser[Expression] = alias |
failure("Invalid expression.")
lazy val expressionList: Parser[List[Expression]] = rep1sep(expression, ",")
def parseExpressionList(expression: String): List[Expression] = {
parseAll(expressionList, expression) match {
case Success(lst, _) => lst
case NoSuccess(msg, next) =>
throwError(msg, next)
}
}
def parseExpression(exprString: String): Expression = {
parseAll(expression, exprString) match {
case Success(lst, _) => lst
case NoSuccess(msg, next) =>
throwError(msg, next)
}
}
private def throwError(msg: String, next: Input): Nothing = {
val improvedMsg = msg.replace("string matching regex `\\z'", "End of expression")
throw ExpressionParserException(
s"""Could not parse expression at column ${next.pos.column}: $improvedMsg
|${next.pos.longString}""".stripMargin)
}
}
|
streamline-eu/dynamic-flink | flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/WindowedStream.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import org.apache.flink.annotation.{Public, PublicEvolving}
import org.apache.flink.api.common.functions.{AggregateFunction, FoldFunction, ReduceFunction}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.datastream.{WindowedStream => JavaWStream}
import org.apache.flink.streaming.api.functions.aggregation.AggregationFunction.AggregationType
import org.apache.flink.streaming.api.functions.aggregation.{ComparableAggregator, SumAggregator}
import org.apache.flink.streaming.api.scala.function.{ProcessWindowFunction, WindowFunction}
import org.apache.flink.streaming.api.scala.function.util._
import org.apache.flink.streaming.api.windowing.evictors.Evictor
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.triggers.Trigger
import org.apache.flink.streaming.api.windowing.windows.Window
import org.apache.flink.util.{Collector, OutputTag}
/**
* A [[WindowedStream]] represents a data stream where elements are grouped by
* key, and for each key, the stream of elements is split into windows based on a
* [[org.apache.flink.streaming.api.windowing.assigners.WindowAssigner]]. Window emission
* is triggered based on a [[Trigger]].
*
* The windows are conceptually evaluated for each key individually, meaning windows can trigger at
* different points for each key.
*
* If an [[org.apache.flink.streaming.api.windowing.evictors.Evictor]] is specified it will
* be used to evict elements from the window after evaluation was triggered by the [[Trigger]]
* but before the actual evaluation of the window. When using an evictor window performance will
* degrade significantly, since pre-aggregation of window results cannot be used.
*
* Note that the [[WindowedStream]] is purely and API construct, during runtime
* the [[WindowedStream]] will be collapsed together with the
* [[KeyedStream]] and the operation over the window into one single operation.
*
* @tparam T The type of elements in the stream.
* @tparam K The type of the key by which elements are grouped.
* @tparam W The type of [[Window]] that the
* [[org.apache.flink.streaming.api.windowing.assigners.WindowAssigner]]
* assigns the elements to.
*/
@Public
class WindowedStream[T, K, W <: Window](javaStream: JavaWStream[T, K, W]) {
/**
* Sets the allowed lateness to a user-specified value.
* If not explicitly set, the allowed lateness is [[0L]].
* Setting the allowed lateness is only valid for event-time windows.
* If a value different than 0 is provided with a processing-time
* [[org.apache.flink.streaming.api.windowing.assigners.WindowAssigner]],
* then an exception is thrown.
*/
@PublicEvolving
def allowedLateness(lateness: Time): WindowedStream[T, K, W] = {
javaStream.allowedLateness(lateness)
this
}
/**
* Send late arriving data to the side output identified by the given [[OutputTag]]. Data
* is considered late after the watermark has passed the end of the window plus the allowed
* lateness set using [[allowedLateness(Time)]].
*
* You can get the stream of late data using [[DataStream.getSideOutput()]] on the [[DataStream]]
* resulting from the windowed operation with the same [[OutputTag]].
*/
@PublicEvolving
def sideOutputLateData(outputTag: OutputTag[T]): WindowedStream[T, K, W] = {
javaStream.sideOutputLateData(outputTag)
this
}
/**
* Sets the [[Trigger]] that should be used to trigger window emission.
*/
@PublicEvolving
def trigger(trigger: Trigger[_ >: T, _ >: W]): WindowedStream[T, K, W] = {
javaStream.trigger(trigger)
this
}
/**
* Sets the [[Evictor]] that should be used to evict elements from a window before emission.
*
* Note: When using an evictor window performance will degrade significantly, since
* pre-aggregation of window results cannot be used.
*/
@PublicEvolving
def evictor(evictor: Evictor[_ >: T, _ >: W]): WindowedStream[T, K, W] = {
javaStream.evictor(evictor)
this
}
// ------------------------------------------------------------------------
// Operations on the keyed windows
// ------------------------------------------------------------------------
// --------------------------- reduce() -----------------------------------
/**
* Applies a reduce function to the window. The window function is called for each evaluation
* of the window for each key individually. The output of the reduce function is interpreted
* as a regular non-windowed stream.
*
* This window will try and pre-aggregate data as much as the window policies permit. For example,
* tumbling time windows can perfectly pre-aggregate the data, meaning that only one element per
* key is stored. Sliding time windows will pre-aggregate on the granularity of the slide
* interval, so a few elements are stored per key (one per slide interval).
* Custom windows may not be able to pre-aggregate, or may need to store extra values in an
* aggregation tree.
*
* @param function The reduce function.
* @return The data stream that is the result of applying the reduce function to the window.
*/
def reduce(function: ReduceFunction[T]): DataStream[T] = {
asScalaStream(javaStream.reduce(clean(function)))
}
/**
* Applies a reduce function to the window. The window function is called for each evaluation
* of the window for each key individually. The output of the reduce function is interpreted
* as a regular non-windowed stream.
*
* This window will try and pre-aggregate data as much as the window policies permit. For example,
* tumbling time windows can perfectly pre-aggregate the data, meaning that only one element per
* key is stored. Sliding time windows will pre-aggregate on the granularity of the slide
* interval, so a few elements are stored per key (one per slide interval).
* Custom windows may not be able to pre-aggregate, or may need to store extra values in an
* aggregation tree.
*
* @param function The reduce function.
* @return The data stream that is the result of applying the reduce function to the window.
*/
def reduce(function: (T, T) => T): DataStream[T] = {
if (function == null) {
throw new NullPointerException("Reduce function must not be null.")
}
val cleanFun = clean(function)
val reducer = new ScalaReduceFunction[T](cleanFun)
reduce(reducer)
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given pre-aggregation reducer.
*
* @param preAggregator The reduce function that is used for pre-aggregation
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
def reduce[R: TypeInformation](
preAggregator: ReduceFunction[T],
function: WindowFunction[T, R, K, W]): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(function)
val applyFunction = new ScalaWindowFunctionWrapper[T, R, K, W](cleanedWindowFunction)
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.reduce(cleanedPreAggregator, applyFunction, resultType))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given pre-aggregation reducer.
*
* @param preAggregator The reduce function that is used for pre-aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
def reduce[R: TypeInformation](
preAggregator: (T, T) => T,
windowFunction: (K, W, Iterable[T], Collector[R]) => Unit): DataStream[R] = {
if (preAggregator == null) {
throw new NullPointerException("Reduce function must not be null.")
}
if (windowFunction == null) {
throw new NullPointerException("WindowApply function must not be null.")
}
val cleanReducer = clean(preAggregator)
val cleanWindowFunction = clean(windowFunction)
val reducer = new ScalaReduceFunction[T](cleanReducer)
val applyFunction = new ScalaWindowFunction[T, R, K, W](cleanWindowFunction)
asScalaStream(javaStream.reduce(reducer, applyFunction, implicitly[TypeInformation[R]]))
}
/**
* Applies the given reduce function to each window. The window reduced value is
* then passed as input of the window function. The output of the window function
* is interpreted as a regular non-windowed stream.
*
* @param preAggregator The reduce function that is used for pre-aggregation
* @param function The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def reduce[R: TypeInformation](
preAggregator: (T, T) => T,
function: ProcessWindowFunction[T, R, K, W]): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(function)
val reducer = new ScalaReduceFunction[T](cleanedPreAggregator)
val applyFunction = new ScalaProcessWindowFunctionWrapper[T, R, K, W](cleanedWindowFunction)
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.reduce(reducer, applyFunction, resultType))
}
/**
* Applies the given reduce function to each window. The window reduced value is
* then passed as input of the window function. The output of the window function
* is interpreted as a regular non-windowed stream.
*
* @param preAggregator The reduce function that is used for pre-aggregation
* @param function The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def reduce[R: TypeInformation](
preAggregator: ReduceFunction[T],
function: ProcessWindowFunction[T, R, K, W]): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(function)
val applyFunction = new ScalaProcessWindowFunctionWrapper[T, R, K, W](cleanedWindowFunction)
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.reduce(cleanedPreAggregator, applyFunction, resultType))
}
// -------------------------- aggregate() ---------------------------------
/**
* Applies the given aggregation function to each window and key. The aggregation function
* is called for each element, aggregating values incrementally and keeping the state to
* one accumulator per key and window.
*
* @param aggregateFunction The aggregation function.
* @return The data stream that is the result of applying the fold function to the window.
*/
@PublicEvolving
def aggregate[ACC: TypeInformation, R: TypeInformation](
aggregateFunction: AggregateFunction[T, ACC, R]): DataStream[R] = {
val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]]
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.aggregate(
clean(aggregateFunction), accumulatorType, resultType))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given aggregation function.
*
* @param preAggregator The aggregation function that is used for pre-aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def aggregate[ACC: TypeInformation, V: TypeInformation, R: TypeInformation](
preAggregator: AggregateFunction[T, ACC, V],
windowFunction: WindowFunction[V, R, K, W]): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(windowFunction)
val applyFunction = new ScalaWindowFunctionWrapper[V, R, K, W](cleanedWindowFunction)
val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]]
val aggregationResultType: TypeInformation[V] = implicitly[TypeInformation[V]]
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.aggregate(
cleanedPreAggregator, applyFunction,
accumulatorType, aggregationResultType, resultType))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given aggregation function.
*
* @param preAggregator The aggregation function that is used for pre-aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def aggregate[ACC: TypeInformation, V: TypeInformation, R: TypeInformation](
preAggregator: AggregateFunction[T, ACC, V],
windowFunction: (K, W, Iterable[V], Collector[R]) => Unit): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(windowFunction)
val applyFunction = new ScalaWindowFunction[V, R, K, W](cleanedWindowFunction)
val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]]
val aggregationResultType: TypeInformation[V] = implicitly[TypeInformation[V]]
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.aggregate(
cleanedPreAggregator, applyFunction,
accumulatorType, aggregationResultType, resultType))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given aggregation function.
*
* @param preAggregator The aggregation function that is used for pre-aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def aggregate[ACC: TypeInformation, V: TypeInformation, R: TypeInformation](
preAggregator: AggregateFunction[T, ACC, V],
windowFunction: ProcessWindowFunction[V, R, K, W]): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(windowFunction)
val applyFunction = new ScalaProcessWindowFunctionWrapper[V, R, K, W](cleanedWindowFunction)
val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]]
val aggregationResultType: TypeInformation[V] = implicitly[TypeInformation[V]]
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.aggregate(
cleanedPreAggregator, applyFunction,
accumulatorType, aggregationResultType, resultType))
}
// ---------------------------- fold() ------------------------------------
/**
* Applies the given fold function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the reduce function is
* interpreted as a regular non-windowed stream.
*
* @param function The fold function.
* @return The data stream that is the result of applying the fold function to the window.
*/
def fold[R: TypeInformation](
initialValue: R,
function: FoldFunction[T,R]): DataStream[R] = {
if (function == null) {
throw new NullPointerException("Fold function must not be null.")
}
val resultType : TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.fold(initialValue, function, resultType))
}
/**
* Applies the given fold function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the reduce function is
* interpreted as a regular non-windowed stream.
*
* @param function The fold function.
* @return The data stream that is the result of applying the fold function to the window.
*/
def fold[R: TypeInformation](initialValue: R)(function: (R, T) => R): DataStream[R] = {
if (function == null) {
throw new NullPointerException("Fold function must not be null.")
}
val cleanFun = clean(function)
val folder = new ScalaFoldFunction[T, R](cleanFun)
fold(initialValue, folder)
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is incrementally aggregated using the given fold function.
*
* @param initialValue The initial value of the fold
* @param foldFunction The fold function that is used for incremental aggregation
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
def fold[ACC: TypeInformation, R: TypeInformation](
initialValue: ACC,
foldFunction: FoldFunction[T, ACC],
function: WindowFunction[ACC, R, K, W]): DataStream[R] = {
val cleanedFunction = clean(function)
val cleanedFoldFunction = clean(foldFunction)
val applyFunction = new ScalaWindowFunctionWrapper[ACC, R, K, W](cleanedFunction)
asScalaStream(javaStream.fold(
initialValue,
cleanedFoldFunction,
applyFunction,
implicitly[TypeInformation[ACC]],
implicitly[TypeInformation[R]]))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is incrementally aggregated using the given fold function.
*
* @param foldFunction The fold function that is used for incremental aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
def fold[ACC: TypeInformation, R: TypeInformation](
initialValue: ACC,
foldFunction: (ACC, T) => ACC,
windowFunction: (K, W, Iterable[ACC], Collector[R]) => Unit): DataStream[R] = {
if (foldFunction == null) {
throw new NullPointerException("Fold function must not be null.")
}
if (windowFunction == null) {
throw new NullPointerException("WindowApply function must not be null.")
}
val cleanFolder = clean(foldFunction)
val cleanWindowFunction = clean(windowFunction)
val folder = new ScalaFoldFunction[T, ACC](cleanFolder)
val applyFunction = new ScalaWindowFunction[ACC, R, K, W](cleanWindowFunction)
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
val accType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]]
asScalaStream(javaStream.fold(initialValue, folder, applyFunction, accType, resultType))
}
/**
* Applies the given fold function to each window. The window folded value is
* then passed as input of the process window function.
* The output of the process window function is interpreted as a regular non-windowed stream.
*
* @param initialValue The initial value of the fold
* @param foldFunction The fold function that is used for incremental aggregation
* @param function The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def fold[R: TypeInformation, ACC: TypeInformation](
initialValue: ACC,
foldFunction: (ACC, T) => ACC,
function: ProcessWindowFunction[ACC, R, K, W]): DataStream[R] = {
val cleanedFunction = clean(function)
val cleanedFoldFunction = clean(foldFunction)
val folder = new ScalaFoldFunction[T, ACC](cleanedFoldFunction)
val applyFunction = new ScalaProcessWindowFunctionWrapper[ACC, R, K, W](cleanedFunction)
asScalaStream(javaStream.fold(
initialValue,
folder,
applyFunction,
implicitly[TypeInformation[ACC]],
implicitly[TypeInformation[R]]))
}
/**
* Applies the given fold function to each window. The window folded value is
* then passed as input of the process window function.
* The output of the process window function is interpreted as a regular non-windowed stream.
*
* @param initialValue The initial value of the fold
* @param foldFunction The fold function that is used for incremental aggregation
* @param function The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def fold[R: TypeInformation, ACC: TypeInformation](
initialValue: ACC,
foldFunction: FoldFunction[T, ACC],
function: ProcessWindowFunction[ACC, R, K, W]): DataStream[R] = {
val cleanedFunction = clean(function)
val cleanedFoldFunction = clean(foldFunction)
val applyFunction = new ScalaProcessWindowFunctionWrapper[ACC, R, K, W](cleanedFunction)
asScalaStream(javaStream.fold(
initialValue,
cleanedFoldFunction,
applyFunction,
implicitly[TypeInformation[ACC]],
implicitly[TypeInformation[R]]))
}
// ---------------------------- apply() -------------------------------------
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Not that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of pre-aggregation.
*
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
def process[R: TypeInformation](
function: ProcessWindowFunction[T, R, K, W]): DataStream[R] = {
val cleanFunction = clean(function)
val applyFunction = new ScalaProcessWindowFunctionWrapper[T, R, K, W](cleanFunction)
asScalaStream(javaStream.process(applyFunction, implicitly[TypeInformation[R]]))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Not that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of pre-aggregation.
*
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
def apply[R: TypeInformation](
function: WindowFunction[T, R, K, W]): DataStream[R] = {
val cleanFunction = clean(function)
val applyFunction = new ScalaWindowFunctionWrapper[T, R, K, W](cleanFunction)
asScalaStream(javaStream.apply(applyFunction, implicitly[TypeInformation[R]]))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Not that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of pre-aggregation.
*
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
def apply[R: TypeInformation](
function: (K, W, Iterable[T], Collector[R]) => Unit): DataStream[R] = {
if (function == null) {
throw new NullPointerException("WindowApply function must not be null.")
}
val cleanedFunction = clean(function)
val applyFunction = new ScalaWindowFunction[T, R, K, W](cleanedFunction)
asScalaStream(javaStream.apply(applyFunction, implicitly[TypeInformation[R]]))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given pre-aggregation reducer.
*
* @param preAggregator The reduce function that is used for pre-aggregation
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
* @deprecated Use [[reduce(ReduceFunction, WindowFunction)]] instead.
*/
@deprecated
def apply[R: TypeInformation](
preAggregator: ReduceFunction[T],
function: WindowFunction[T, R, K, W]): DataStream[R] = {
val cleanedPreAggregator = clean(preAggregator)
val cleanedWindowFunction = clean(function)
val applyFunction = new ScalaWindowFunctionWrapper[T, R, K, W](cleanedWindowFunction)
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.apply(cleanedPreAggregator, applyFunction, resultType))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is pre-aggregated using the given pre-aggregation reducer.
*
* @param preAggregator The reduce function that is used for pre-aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
* @deprecated Use [[reduce(ReduceFunction, WindowFunction)]] instead.
*/
@deprecated
def apply[R: TypeInformation](
preAggregator: (T, T) => T,
windowFunction: (K, W, Iterable[T], Collector[R]) => Unit): DataStream[R] = {
if (preAggregator == null) {
throw new NullPointerException("Reduce function must not be null.")
}
if (windowFunction == null) {
throw new NullPointerException("WindowApply function must not be null.")
}
val cleanReducer = clean(preAggregator)
val cleanWindowFunction = clean(windowFunction)
val reducer = new ScalaReduceFunction[T](cleanReducer)
val applyFunction = new ScalaWindowFunction[T, R, K, W](cleanWindowFunction)
asScalaStream(javaStream.apply(reducer, applyFunction, implicitly[TypeInformation[R]]))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is incrementally aggregated using the given fold function.
*
* @param initialValue The initial value of the fold
* @param foldFunction The fold function that is used for incremental aggregation
* @param function The window function.
* @return The data stream that is the result of applying the window function to the window.
* @deprecated Use [[fold(R, FoldFunction, WindowFunction)]] instead.
*/
@deprecated
def apply[R: TypeInformation](
initialValue: R,
foldFunction: FoldFunction[T, R],
function: WindowFunction[R, R, K, W]): DataStream[R] = {
val cleanedFunction = clean(function)
val cleanedFoldFunction = clean(foldFunction)
val applyFunction = new ScalaWindowFunctionWrapper[R, R, K, W](cleanedFunction)
asScalaStream(javaStream.apply(
initialValue,
cleanedFoldFunction,
applyFunction,
implicitly[TypeInformation[R]]))
}
/**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* Arriving data is incrementally aggregated using the given fold function.
*
* @param foldFunction The fold function that is used for incremental aggregation
* @param windowFunction The window function.
* @return The data stream that is the result of applying the window function to the window.
* @deprecated Use [[fold(R, FoldFunction, WindowFunction)]] instead.
*/
@deprecated
def apply[R: TypeInformation](
initialValue: R,
foldFunction: (R, T) => R,
windowFunction: (K, W, Iterable[R], Collector[R]) => Unit): DataStream[R] = {
if (foldFunction == null) {
throw new NullPointerException("Fold function must not be null.")
}
if (windowFunction == null) {
throw new NullPointerException("WindowApply function must not be null.")
}
val cleanFolder = clean(foldFunction)
val cleanWindowFunction = clean(windowFunction)
val folder = new ScalaFoldFunction[T, R](cleanFolder)
val applyFunction = new ScalaWindowFunction[R, R, K, W](cleanWindowFunction)
val resultType: TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(javaStream.apply(initialValue, folder, applyFunction, resultType))
}
// ------------------------------------------------------------------------
// Aggregations on the keyed windows
// ------------------------------------------------------------------------
/**
* Applies an aggregation that that gives the maximum of the elements in the window at
* the given position.
*/
def max(position: Int): DataStream[T] = aggregate(AggregationType.MAX, position)
/**
* Applies an aggregation that that gives the maximum of the elements in the window at
* the given field.
*/
def max(field: String): DataStream[T] = aggregate(AggregationType.MAX, field)
/**
* Applies an aggregation that that gives the minimum of the elements in the window at
* the given position.
*/
def min(position: Int): DataStream[T] = aggregate(AggregationType.MIN, position)
/**
* Applies an aggregation that that gives the minimum of the elements in the window at
* the given field.
*/
def min(field: String): DataStream[T] = aggregate(AggregationType.MIN, field)
/**
* Applies an aggregation that sums the elements in the window at the given position.
*/
def sum(position: Int): DataStream[T] = aggregate(AggregationType.SUM, position)
/**
* Applies an aggregation that sums the elements in the window at the given field.
*/
def sum(field: String): DataStream[T] = aggregate(AggregationType.SUM, field)
/**
* Applies an aggregation that that gives the maximum element of the window by
* the given position. When equality, returns the first.
*/
def maxBy(position: Int): DataStream[T] = aggregate(AggregationType.MAXBY,
position)
/**
* Applies an aggregation that that gives the maximum element of the window by
* the given field. When equality, returns the first.
*/
def maxBy(field: String): DataStream[T] = aggregate(AggregationType.MAXBY,
field)
/**
* Applies an aggregation that that gives the minimum element of the window by
* the given position. When equality, returns the first.
*/
def minBy(position: Int): DataStream[T] = aggregate(AggregationType.MINBY,
position)
/**
* Applies an aggregation that that gives the minimum element of the window by
* the given field. When equality, returns the first.
*/
def minBy(field: String): DataStream[T] = aggregate(AggregationType.MINBY,
field)
private def aggregate(aggregationType: AggregationType, field: String): DataStream[T] = {
val position = fieldNames2Indices(getInputType(), Array(field))(0)
aggregate(aggregationType, position)
}
def aggregate(aggregationType: AggregationType, position: Int): DataStream[T] = {
val jStream = javaStream.asInstanceOf[JavaWStream[Product, K, W]]
val reducer = aggregationType match {
case AggregationType.SUM =>
new SumAggregator(position, jStream.getInputType, jStream.getExecutionEnvironment.getConfig)
case _ =>
new ComparableAggregator(
position,
jStream.getInputType,
aggregationType,
true,
jStream.getExecutionEnvironment.getConfig)
}
new DataStream[Product](jStream.reduce(reducer)).asInstanceOf[DataStream[T]]
}
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning
* is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]].
*/
private[flink] def clean[F <: AnyRef](f: F): F = {
new StreamExecutionEnvironment(javaStream.getExecutionEnvironment).scalaClean(f)
}
/**
* Gets the output type.
*/
private def getInputType(): TypeInformation[T] = javaStream.getInputType
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/PhysicalTableSourceScan.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes
import org.apache.calcite.plan.{RelOptCluster, RelOptTable, RelTraitSet}
import org.apache.calcite.rel.RelWriter
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.TableScan
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.sources.TableSource
import scala.collection.JavaConverters._
abstract class PhysicalTableSourceScan(
cluster: RelOptCluster,
traitSet: RelTraitSet,
table: RelOptTable,
val tableSource: TableSource[_])
extends TableScan(cluster, traitSet, table) {
override def deriveRowType(): RelDataType = {
val flinkTypeFactory = cluster.getTypeFactory.asInstanceOf[FlinkTypeFactory]
flinkTypeFactory.buildRowDataType(
TableEnvironment.getFieldNames(tableSource),
TableEnvironment.getFieldTypes(tableSource.getReturnType))
}
override def explainTerms(pw: RelWriter): RelWriter = {
val terms = super.explainTerms(pw)
.item("fields", TableEnvironment.getFieldNames(tableSource).mkString(", "))
val sourceDesc = tableSource.explainSource()
if (sourceDesc.nonEmpty) {
terms.item("source", sourceDesc)
} else {
terms
}
}
override def toString: String = {
val tableName = getTable.getQualifiedName
val s = s"table:$tableName, fields:(${getRowType.getFieldNames.asScala.toList.mkString(", ")})"
val sourceDesc = tableSource.explainSource()
if (sourceDesc.nonEmpty) {
s"Scan($s, source:$sourceDesc)"
} else {
s"Scan($s)"
}
}
def copy(traitSet: RelTraitSet, tableSource: TableSource[_]): PhysicalTableSourceScan
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/windows.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.table.expressions.{Expression, ExpressionParser}
import org.apache.flink.table.plan.logical._
/**
* A window specification.
*
* Window groups rows based on time or row-count intervals. It is a general way to group the
* elements, which is very helpful for both groupby-aggregations and over-aggregations to
* compute aggregates on groups of elements.
*
* Infinite streaming tables can only be grouped into time or row intervals. Hence window grouping
* is required to apply aggregations on streaming tables.
*
* For finite batch tables, window provides shortcuts for time-based groupBy.
*
*/
abstract class Window {
// The expression of alias for this Window
private[flink] var alias: Option[Expression] = None
/**
* Converts an API class to a logical window for planning.
*/
private[flink] def toLogicalWindow: LogicalWindow
/**
* Assigns an alias for this window that the following `groupBy()` and `select()` clause can
* refer to. `select()` statement can access window properties such as window start or end time.
*
* @param alias alias for this window
* @return this window
*/
def as(alias: Expression): Window = {
this.alias = Some(alias)
this
}
/**
* Assigns an alias for this window that the following `groupBy()` and `select()` clause can
* refer to. `select()` statement can access window properties such as window start or end time.
*
* @param alias alias for this window
* @return this window
*/
def as(alias: String): Window = as(ExpressionParser.parseExpression(alias))
}
/**
* A window operating on event-time.
*
* @param timeField defines the time mode for streaming tables. For batch table it defines the
* time attribute on which is grouped.
*/
abstract class EventTimeWindow(val timeField: Expression) extends Window
// ------------------------------------------------------------------------------------------------
// Tumbling windows
// ------------------------------------------------------------------------------------------------
/**
* Tumbling window.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows are
* grouped by processing-time.
*
* @param size the size of the window either as time or row-count interval.
*/
class TumblingWindow(size: Expression) extends Window {
/**
* Tumbling window.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* @param size the size of the window either as time or row-count interval.
*/
def this(size: String) = this(ExpressionParser.parseExpression(size))
/**
* Specifies the time attribute on which rows are grouped.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* For batch tables, refer to a timestamp or long attribute.
*
* @param timeField time mode for streaming tables and time attribute for batch tables
* @return a tumbling window on event-time
*/
def on(timeField: Expression): TumblingEventTimeWindow =
new TumblingEventTimeWindow(timeField, size)
/**
* Specifies the time attribute on which rows are grouped.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* For batch tables, refer to a timestamp or long attribute.
*
* @param timeField time mode for streaming tables and time attribute for batch tables
* @return a tumbling window on event-time
*/
def on(timeField: String): TumblingEventTimeWindow =
on(ExpressionParser.parseExpression(timeField))
override private[flink] def toLogicalWindow: LogicalWindow =
ProcessingTimeTumblingGroupWindow(alias, size)
}
/**
* Tumbling window on event-time.
*/
class TumblingEventTimeWindow(
time: Expression,
size: Expression)
extends EventTimeWindow(time) {
override private[flink] def toLogicalWindow: LogicalWindow =
EventTimeTumblingGroupWindow(alias, time, size)
}
// ------------------------------------------------------------------------------------------------
// Sliding windows
// ------------------------------------------------------------------------------------------------
/**
* Partially specified sliding window.
*
* @param size the size of the window either as time or row-count interval.
*/
class SlideWithSize(size: Expression) {
/**
* Partially specified sliding window.
*
* @param size the size of the window either as time or row-count interval.
*/
def this(size: String) = this(ExpressionParser.parseExpression(size))
/**
* Specifies the window's slide as time or row-count interval.
*
* The slide determines the interval in which windows are started. Hence, sliding windows can
* overlap if the slide is smaller than the size of the window.
*
* For example, you could have windows of size 15 minutes that slide by 3 minutes. With this
* 15 minutes worth of elements are grouped every 3 minutes and each row contributes to 5
* windows.
*
* @param slide the slide of the window either as time or row-count interval.
* @return a sliding window
*/
def every(slide: Expression): SlidingWindow = new SlidingWindow(size, slide)
/**
* Specifies the window's slide as time or row-count interval.
*
* The slide determines the interval in which windows are started. Hence, sliding windows can
* overlap if the slide is smaller than the size of the window.
*
* For example, you could have windows of size 15 minutes that slide by 3 minutes. With this
* 15 minutes worth of elements are grouped every 3 minutes and each row contributes to 5
* windows.
*
* @param slide the slide of the window either as time or row-count interval.
* @return a sliding window
*/
def every(slide: String): SlidingWindow = every(ExpressionParser.parseExpression(slide))
}
/**
* Sliding window.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows are
* grouped by processing-time.
*
* @param size the size of the window either as time or row-count interval.
*/
class SlidingWindow(
size: Expression,
slide: Expression)
extends Window {
/**
* Specifies the time attribute on which rows are grouped.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* For batch tables, refer to a timestamp or long attribute.
*
* @param timeField time mode for streaming tables and time attribute for batch tables
* @return a sliding window on event-time
*/
def on(timeField: Expression): SlidingEventTimeWindow =
new SlidingEventTimeWindow(timeField, size, slide)
/**
* Specifies the time attribute on which rows are grouped.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* For batch tables, refer to a timestamp or long attribute.
*
* @param timeField time mode for streaming tables and time attribute for batch tables
* @return a sliding window on event-time
*/
def on(timeField: String): SlidingEventTimeWindow =
on(ExpressionParser.parseExpression(timeField))
override private[flink] def toLogicalWindow: LogicalWindow =
ProcessingTimeSlidingGroupWindow(alias, size, slide)
}
/**
* Sliding window on event-time.
*/
class SlidingEventTimeWindow(
timeField: Expression,
size: Expression,
slide: Expression)
extends EventTimeWindow(timeField) {
override private[flink] def toLogicalWindow: LogicalWindow =
EventTimeSlidingGroupWindow(alias, timeField, size, slide)
}
// ------------------------------------------------------------------------------------------------
// Session windows
// ------------------------------------------------------------------------------------------------
/**
* Session window.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows are
* grouped by processing-time.
*
* @param gap the time interval of inactivity before a window is closed.
*/
class SessionWindow(gap: Expression) extends Window {
/**
* Session window.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* @param gap the time interval of inactivity before a window is closed.
*/
def this(gap: String) = this(ExpressionParser.parseExpression(gap))
/**
* Specifies the time attribute on which rows are grouped.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* For batch tables, refer to a timestamp or long attribute.
*
* @param timeField time mode for streaming tables and time attribute for batch tables
* @return a session window on event-time
*/
def on(timeField: Expression): SessionEventTimeWindow =
new SessionEventTimeWindow(timeField, gap)
/**
* Specifies the time attribute on which rows are grouped.
*
* For streaming tables call [[on('rowtime)]] to specify grouping by event-time. Otherwise rows
* are grouped by processing-time.
*
* For batch tables, refer to a timestamp or long attribute.
*
* @param timeField time mode for streaming tables and time attribute for batch tables
* @return a session window on event-time
*/
def on(timeField: String): SessionEventTimeWindow =
on(ExpressionParser.parseExpression(timeField))
override private[flink] def toLogicalWindow: LogicalWindow =
ProcessingTimeSessionGroupWindow(alias, gap)
}
/**
* Session window on event-time.
*/
class SessionEventTimeWindow(
timeField: Expression,
gap: Expression)
extends EventTimeWindow(timeField) {
override private[flink] def toLogicalWindow: LogicalWindow =
EventTimeSessionGroupWindow(alias, timeField, gap)
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/TableEnvironmentTest.scala | <reponame>streamline-eu/dynamic-flink
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table
import org.apache.flink.api.scala._
import org.apache.flink.api.common.typeinfo.BasicTypeInfo._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.{TupleTypeInfo, TypeExtractor}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.TableException
import org.apache.flink.table.expressions.{Alias, UnresolvedFieldReference}
import org.apache.flink.table.utils.{MockTableEnvironment, TableTestBase}
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
import org.junit.Assert.assertEquals
class TableEnvironmentTest extends TableTestBase {
val tEnv = new MockTableEnvironment
val tupleType = new TupleTypeInfo(
INT_TYPE_INFO,
STRING_TYPE_INFO,
DOUBLE_TYPE_INFO)
val caseClassType = implicitly[TypeInformation[CClass]]
val pojoType = TypeExtractor.createTypeInfo(classOf[PojoClass])
val atomicType = INT_TYPE_INFO
@Test
def testGetFieldInfoTuple(): Unit = {
val fieldInfo = tEnv.getFieldInfo(tupleType)
fieldInfo._1.zip(Array("f0", "f1", "f2")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoCClass(): Unit = {
val fieldInfo = tEnv.getFieldInfo(caseClassType)
fieldInfo._1.zip(Array("cf1", "cf2", "cf3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoPojo(): Unit = {
val fieldInfo = tEnv.getFieldInfo(pojoType)
fieldInfo._1.zip(Array("pf1", "pf2", "pf3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoAtomic(): Unit = {
val fieldInfo = tEnv.getFieldInfo(atomicType)
fieldInfo._1.zip(Array("f0")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoTupleNames(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
tupleType,
Array(
UnresolvedFieldReference("name1"),
UnresolvedFieldReference("name2"),
UnresolvedFieldReference("name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoCClassNames(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
caseClassType,
Array(
UnresolvedFieldReference("name1"),
UnresolvedFieldReference("name2"),
UnresolvedFieldReference("name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoPojoNames1(): Unit = {
tEnv.getFieldInfo(
pojoType,
Array(
UnresolvedFieldReference("name1"),
UnresolvedFieldReference("name2"),
UnresolvedFieldReference("name3")
))
}
@Test
def testGetFieldInfoPojoNames2(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
pojoType,
Array(
UnresolvedFieldReference("pf3"),
UnresolvedFieldReference("pf1"),
UnresolvedFieldReference("pf2")
))
fieldInfo._1.zip(Array("pf3", "pf1", "pf2")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(2, 0, 1)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoAtomicName1(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
atomicType,
Array(UnresolvedFieldReference("name"))
)
fieldInfo._1.zip(Array("name")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0)).foreach(x => assertEquals(x._2, x._1))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoAtomicName2(): Unit = {
tEnv.getFieldInfo(
atomicType,
Array(
UnresolvedFieldReference("name1"),
UnresolvedFieldReference("name2")
))
}
@Test
def testGetFieldInfoTupleAlias1(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
tupleType,
Array(
Alias(UnresolvedFieldReference("f0"), "name1"),
Alias(UnresolvedFieldReference("f1"), "name2"),
Alias(UnresolvedFieldReference("f2"), "name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoTupleAlias2(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
tupleType,
Array(
Alias(UnresolvedFieldReference("f2"), "name1"),
Alias(UnresolvedFieldReference("f0"), "name2"),
Alias(UnresolvedFieldReference("f1"), "name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(2, 0, 1)).foreach(x => assertEquals(x._2, x._1))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoTupleAlias3(): Unit = {
tEnv.getFieldInfo(
tupleType,
Array(
Alias(UnresolvedFieldReference("xxx"), "name1"),
Alias(UnresolvedFieldReference("yyy"), "name2"),
Alias(UnresolvedFieldReference("zzz"), "name3")
))
}
@Test
def testGetFieldInfoCClassAlias1(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
caseClassType,
Array(
Alias(UnresolvedFieldReference("cf1"), "name1"),
Alias(UnresolvedFieldReference("cf2"), "name2"),
Alias(UnresolvedFieldReference("cf3"), "name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoCClassAlias2(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
caseClassType,
Array(
Alias(UnresolvedFieldReference("cf3"), "name1"),
Alias(UnresolvedFieldReference("cf1"), "name2"),
Alias(UnresolvedFieldReference("cf2"), "name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(2, 0, 1)).foreach(x => assertEquals(x._2, x._1))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoCClassAlias3(): Unit = {
tEnv.getFieldInfo(
caseClassType,
Array(
Alias(UnresolvedFieldReference("xxx"), "name1"),
Alias(UnresolvedFieldReference("yyy"), "name2"),
Alias(UnresolvedFieldReference("zzz"), "name3")
))
}
@Test
def testGetFieldInfoPojoAlias1(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
pojoType,
Array(
Alias(UnresolvedFieldReference("pf1"), "name1"),
Alias(UnresolvedFieldReference("pf2"), "name2"),
Alias(UnresolvedFieldReference("pf3"), "name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(0, 1, 2)).foreach(x => assertEquals(x._2, x._1))
}
@Test
def testGetFieldInfoPojoAlias2(): Unit = {
val fieldInfo = tEnv.getFieldInfo(
pojoType,
Array(
Alias(UnresolvedFieldReference("pf3"), "name1"),
Alias(UnresolvedFieldReference("pf1"), "name2"),
Alias(UnresolvedFieldReference("pf2"), "name3")
))
fieldInfo._1.zip(Array("name1", "name2", "name3")).foreach(x => assertEquals(x._2, x._1))
fieldInfo._2.zip(Array(2, 0, 1)).foreach(x => assertEquals(x._2, x._1))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoPojoAlias3(): Unit = {
tEnv.getFieldInfo(
pojoType,
Array(
Alias(UnresolvedFieldReference("xxx"), "name1"),
Alias(UnresolvedFieldReference("yyy"), "name2"),
Alias( UnresolvedFieldReference("zzz"), "name3")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoAtomicAlias(): Unit = {
tEnv.getFieldInfo(
atomicType,
Array(
Alias(UnresolvedFieldReference("name1"), "name2")
))
}
@Test
def testSqlWithoutRegisteringForBatchTables(): Unit = {
val util = batchTestUtil()
val table = util.addTable[(Long, Int, String)]("tableName", 'a, 'b, 'c)
val sqlTable = util.tEnv.sql(s"SELECT a, b, c FROM $table WHERE b > 12")
val expected = unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a, b, c"),
term("where", ">(b, 12)"))
util.verifyTable(sqlTable, expected)
val table2 = util.addTable[(Long, Int, String)]('d, 'e, 'f)
val sqlTable2 = util.tEnv.sql(s"SELECT d, e, f FROM $table, $table2 WHERE c = d")
val join = unaryNode(
"DataSetJoin",
binaryNode(
"DataSetCalc",
batchTableNode(0),
batchTableNode(1),
term("select", "c")),
term("where", "=(c, d)"),
term("join", "c, d, e, f"),
term("joinType", "InnerJoin"))
val expected2 = unaryNode(
"DataSetCalc",
join,
term("select", "d, e, f"))
util.verifyTable(sqlTable2, expected2)
}
@Test
def testSqlWithoutRegisteringForStreamTables(): Unit = {
val util = streamTestUtil()
val table = util.addTable[(Long, Int, String)]("tableName", 'a, 'b, 'c)
val sqlTable = util.tEnv.sql(s"SELECT a, b, c FROM $table WHERE b > 12")
val expected = unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a, b, c"),
term("where", ">(b, 12)"))
util.verifyTable(sqlTable, expected)
val table2 = util.addTable[(Long, Int, String)]('d, 'e, 'f)
val sqlTable2 = util.tEnv.sql(s"SELECT d, e, f FROM $table2 " +
s"UNION ALL SELECT a, b, c FROM $table")
val expected2 = binaryNode(
"DataStreamUnion",
streamTableNode(1),
streamTableNode(0),
term("union all", "d, e, f"))
util.verifyTable(sqlTable2, expected2)
}
}
case class CClass(cf1: Int, cf2: String, cf3: Double)
class PojoClass(var pf1: Int, var pf2: String, var pf3: Double) {
def this() = this(0, "", 0.0)
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/aggfunctions/AvgAggFunction.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.math.{BigDecimal, BigInteger}
import java.util.{List => JList}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.TupleTypeInfo
import org.apache.flink.table.functions.{Accumulator, AggregateFunction}
/** The initial accumulator for Integral Avg aggregate function */
class IntegralAvgAccumulator extends JTuple2[Long, Long] with Accumulator {
f0 = 0L //sum
f1 = 0L //count
}
/**
* Base class for built-in Integral Avg aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class IntegralAvgAggFunction[T] extends AggregateFunction[T] {
override def createAccumulator(): Accumulator = {
new IntegralAvgAccumulator
}
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].longValue()
val accum = accumulator.asInstanceOf[IntegralAvgAccumulator]
accum.f0 += v
accum.f1 += 1L
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].longValue()
val accum = accumulator.asInstanceOf[IntegralAvgAccumulator]
accum.f0 -= v
accum.f1 -= 1L
}
}
override def getValue(accumulator: Accumulator): T = {
val accum = accumulator.asInstanceOf[IntegralAvgAccumulator]
if (accum.f1 == 0) {
null.asInstanceOf[T]
} else {
resultTypeConvert(accum.f0 / accum.f1)
}
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[IntegralAvgAccumulator]
var i: Int = 1
while (i < accumulators.size()) {
val a = accumulators.get(i).asInstanceOf[IntegralAvgAccumulator]
ret.f1 += a.f1
ret.f0 += a.f0
i += 1
}
ret
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[IntegralAvgAccumulator].f0 = 0L
accumulator.asInstanceOf[IntegralAvgAccumulator].f1 = 0L
}
override def getAccumulatorType: TypeInformation[_] = {
new TupleTypeInfo(
new IntegralAvgAccumulator().getClass,
BasicTypeInfo.LONG_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
/**
* Convert the intermediate result to the expected aggregation result type
*
* @param value the intermediate result. We use a Long container to save
* the intermediate result to avoid the overflow by sum operation.
* @return the result value with the expected aggregation result type
*/
def resultTypeConvert(value: Long): T
}
/**
* Built-in Byte Avg aggregate function
*/
class ByteAvgAggFunction extends IntegralAvgAggFunction[Byte] {
override def resultTypeConvert(value: Long): Byte = value.toByte
}
/**
* Built-in Short Avg aggregate function
*/
class ShortAvgAggFunction extends IntegralAvgAggFunction[Short] {
override def resultTypeConvert(value: Long): Short = value.toShort
}
/**
* Built-in Int Avg aggregate function
*/
class IntAvgAggFunction extends IntegralAvgAggFunction[Int] {
override def resultTypeConvert(value: Long): Int = value.toInt
}
/** The initial accumulator for Big Integral Avg aggregate function */
class BigIntegralAvgAccumulator
extends JTuple2[BigInteger, Long] with Accumulator {
f0 = BigInteger.ZERO //sum
f1 = 0L //count
}
/**
* Base Class for Built-in Big Integral Avg aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class BigIntegralAvgAggFunction[T] extends AggregateFunction[T] {
override def createAccumulator(): Accumulator = {
new BigIntegralAvgAccumulator
}
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Long]
val a = accumulator.asInstanceOf[BigIntegralAvgAccumulator]
a.f0 = a.f0.add(BigInteger.valueOf(v))
a.f1 += 1L
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Long]
val a = accumulator.asInstanceOf[BigIntegralAvgAccumulator]
a.f0 = a.f0.subtract(BigInteger.valueOf(v))
a.f1 -= 1L
}
}
override def getValue(accumulator: Accumulator): T = {
val a = accumulator.asInstanceOf[BigIntegralAvgAccumulator]
if (a.f1 == 0) {
null.asInstanceOf[T]
} else {
resultTypeConvert(a.f0.divide(BigInteger.valueOf(a.f1)))
}
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[BigIntegralAvgAccumulator]
var i: Int = 1
while (i < accumulators.size()) {
val a = accumulators.get(i).asInstanceOf[BigIntegralAvgAccumulator]
ret.f1 += a.f1
ret.f0 = ret.f0.add(a.f0)
i += 1
}
ret
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[BigIntegralAvgAccumulator].f0 = BigInteger.ZERO
accumulator.asInstanceOf[BigIntegralAvgAccumulator].f1 = 0
}
override def getAccumulatorType: TypeInformation[_] = {
new TupleTypeInfo(
new BigIntegralAvgAccumulator().getClass,
BasicTypeInfo.BIG_INT_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
/**
* Convert the intermediate result to the expected aggregation result type
*
* @param value the intermediate result. We use a BigInteger container to
* save the intermediate result to avoid the overflow by sum
* operation.
* @return the result value with the expected aggregation result type
*/
def resultTypeConvert(value: BigInteger): T
}
/**
* Built-in Long Avg aggregate function
*/
class LongAvgAggFunction extends BigIntegralAvgAggFunction[Long] {
override def resultTypeConvert(value: BigInteger): Long = value.longValue()
}
/** The initial accumulator for Floating Avg aggregate function */
class FloatingAvgAccumulator extends JTuple2[Double, Long] with Accumulator {
f0 = 0 //sum
f1 = 0L //count
}
/**
* Base class for built-in Floating Avg aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class FloatingAvgAggFunction[T] extends AggregateFunction[T] {
override def createAccumulator(): Accumulator = {
new FloatingAvgAccumulator
}
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].doubleValue()
val accum = accumulator.asInstanceOf[FloatingAvgAccumulator]
accum.f0 += v
accum.f1 += 1L
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].doubleValue()
val accum = accumulator.asInstanceOf[FloatingAvgAccumulator]
accum.f0 -= v
accum.f1 -= 1L
}
}
override def getValue(accumulator: Accumulator): T = {
val accum = accumulator.asInstanceOf[FloatingAvgAccumulator]
if (accum.f1 == 0) {
null.asInstanceOf[T]
} else {
resultTypeConvert(accum.f0 / accum.f1)
}
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[FloatingAvgAccumulator]
var i: Int = 1
while (i < accumulators.size()) {
val a = accumulators.get(i).asInstanceOf[FloatingAvgAccumulator]
ret.f1 += a.f1
ret.f0 += a.f0
i += 1
}
ret
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[FloatingAvgAccumulator].f0 = 0
accumulator.asInstanceOf[FloatingAvgAccumulator].f1 = 0L
}
override def getAccumulatorType: TypeInformation[_] = {
new TupleTypeInfo(
new FloatingAvgAccumulator().getClass,
BasicTypeInfo.DOUBLE_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
/**
* Convert the intermediate result to the expected aggregation result type
*
* @param value the intermediate result. We use a Double container to save
* the intermediate result to avoid the overflow by sum operation.
* @return the result value with the expected aggregation result type
*/
def resultTypeConvert(value: Double): T
}
/**
* Built-in Float Avg aggregate function
*/
class FloatAvgAggFunction extends FloatingAvgAggFunction[Float] {
override def resultTypeConvert(value: Double): Float = value.toFloat
}
/**
* Built-in Int Double aggregate function
*/
class DoubleAvgAggFunction extends FloatingAvgAggFunction[Double] {
override def resultTypeConvert(value: Double): Double = value
}
/** The initial accumulator for Big Decimal Avg aggregate function */
class DecimalAvgAccumulator
extends JTuple2[BigDecimal, Long] with Accumulator {
f0 = BigDecimal.ZERO //sum
f1 = 0L //count
}
/**
* Base class for built-in Big Decimal Avg aggregate function
*/
class DecimalAvgAggFunction extends AggregateFunction[BigDecimal] {
override def createAccumulator(): Accumulator = {
new DecimalAvgAccumulator
}
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
val accum = accumulator.asInstanceOf[DecimalAvgAccumulator]
accum.f0 = accum.f0.add(v)
accum.f1 += 1L
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
val accum = accumulator.asInstanceOf[DecimalAvgAccumulator]
accum.f0 = accum.f0.subtract(v)
accum.f1 -= 1L
}
}
override def getValue(accumulator: Accumulator): BigDecimal = {
val a = accumulator.asInstanceOf[DecimalAvgAccumulator]
if (a.f1 == 0) {
null.asInstanceOf[BigDecimal]
} else {
a.f0.divide(BigDecimal.valueOf(a.f1))
}
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[DecimalAvgAccumulator]
var i: Int = 1
while (i < accumulators.size()) {
val a = accumulators.get(i).asInstanceOf[DecimalAvgAccumulator]
ret.f0 = ret.f0.add(a.f0)
ret.f1 += a.f1
i += 1
}
ret
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[DecimalAvgAccumulator].f0 = BigDecimal.ZERO
accumulator.asInstanceOf[DecimalAvgAccumulator].f1 = 0L
}
override def getAccumulatorType: TypeInformation[_] = {
new TupleTypeInfo(
new DecimalAvgAccumulator().getClass,
BasicTypeInfo.BIG_DEC_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/TemporalTypesTest.scala | <filename>flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/TemporalTypesTest.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import java.sql.{Date, Time, Timestamp}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.types.Row
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.utils.ExpressionTestBase
import org.junit.Test
class TemporalTypesTest extends ExpressionTestBase {
@Test
def testTimePointLiterals(): Unit = {
testAllApis(
"1990-10-14".toDate,
"'1990-10-14'.toDate",
"DATE '1990-10-14'",
"1990-10-14")
testTableApi(
Date.valueOf("2040-09-11"),
"'2040-09-11'.toDate",
"2040-09-11")
testAllApis(
"1500-04-30".cast(Types.SQL_DATE),
"'1500-04-30'.cast(DATE)",
"CAST('1500-04-30' AS DATE)",
"1500-04-30")
testAllApis(
"15:45:59".toTime,
"'15:45:59'.toTime",
"TIME '15:45:59'",
"15:45:59")
testTableApi(
Time.valueOf("00:00:00"),
"'00:00:00'.toTime",
"00:00:00")
testAllApis(
"1:30:00".cast(Types.SQL_TIME),
"'1:30:00'.cast(TIME)",
"CAST('1:30:00' AS TIME)",
"01:30:00")
testAllApis(
"1990-10-14 23:00:00.123".toTimestamp,
"'1990-10-14 23:00:00.123'.toTimestamp",
"TIMESTAMP '1990-10-14 23:00:00.123'",
"1990-10-14 23:00:00.123")
testTableApi(
Timestamp.valueOf("2040-09-11 00:00:00.000"),
"'2040-09-11 00:00:00.000'.toTimestamp",
"2040-09-11 00:00:00.0")
testAllApis(
"1500-04-30 12:00:00".cast(Types.SQL_TIMESTAMP),
"'1500-04-30 12:00:00'.cast(TIMESTAMP)",
"CAST('1500-04-30 12:00:00' AS TIMESTAMP)",
"1500-04-30 12:00:00.0")
}
@Test
def testTimeIntervalLiterals(): Unit = {
testAllApis(
1.year,
"1.year",
"INTERVAL '1' YEAR",
"+1-00")
testAllApis(
1.month,
"1.month",
"INTERVAL '1' MONTH",
"+0-01")
testAllApis(
12.days,
"12.days",
"INTERVAL '12' DAY",
"+12 00:00:00.000")
testAllApis(
1.hour,
"1.hour",
"INTERVAL '1' HOUR",
"+0 01:00:00.000")
testAllApis(
3.minutes,
"3.minutes",
"INTERVAL '3' MINUTE",
"+0 00:03:00.000")
testAllApis(
3.seconds,
"3.seconds",
"INTERVAL '3' SECOND",
"+0 00:00:03.000")
testAllApis(
3.millis,
"3.millis",
"INTERVAL '0.003' SECOND",
"+0 00:00:00.003")
}
@Test
def testTimePointInput(): Unit = {
testAllApis(
'f0,
"f0",
"f0",
"1990-10-14")
testAllApis(
'f1,
"f1",
"f1",
"10:20:45")
testAllApis(
'f2,
"f2",
"f2",
"1990-10-14 10:20:45.123")
}
@Test
def testTimeIntervalInput(): Unit = {
testAllApis(
'f9,
"f9",
"f9",
"+2-00")
testAllApis(
'f10,
"f10",
"f10",
"+0 00:00:12.000")
}
@Test
def testTimePointCasting(): Unit = {
testAllApis(
'f0.cast(Types.SQL_TIMESTAMP),
"f0.cast(TIMESTAMP)",
"CAST(f0 AS TIMESTAMP)",
"1990-10-14 00:00:00.0")
testAllApis(
'f1.cast(Types.SQL_TIMESTAMP),
"f1.cast(TIMESTAMP)",
"CAST(f1 AS TIMESTAMP)",
"1970-01-01 10:20:45.0")
testAllApis(
'f2.cast(Types.SQL_DATE),
"f2.cast(DATE)",
"CAST(f2 AS DATE)",
"1990-10-14")
testAllApis(
'f2.cast(Types.SQL_TIME),
"f2.cast(TIME)",
"CAST(f2 AS TIME)",
"10:20:45")
testAllApis(
'f2.cast(Types.SQL_TIME),
"f2.cast(TIME)",
"CAST(f2 AS TIME)",
"10:20:45")
testTableApi(
'f7.cast(Types.SQL_DATE),
"f7.cast(DATE)",
"2002-11-09")
testTableApi(
'f7.cast(Types.SQL_DATE).cast(Types.INT),
"f7.cast(DATE).cast(INT)",
"12000")
testTableApi(
'f7.cast(Types.SQL_TIME),
"f7.cast(TIME)",
"00:00:12")
testTableApi(
'f7.cast(Types.SQL_TIME).cast(Types.INT),
"f7.cast(TIME).cast(INT)",
"12000")
testTableApi(
'f8.cast(Types.SQL_TIMESTAMP),
"f8.cast(TIMESTAMP)",
"2016-06-27 07:23:33.0")
testTableApi(
'f8.cast(Types.SQL_TIMESTAMP).cast(Types.LONG),
"f8.cast(TIMESTAMP).cast(LONG)",
"1467012213000")
}
@Test
def testTimeIntervalCasting(): Unit = {
testTableApi(
'f7.cast(Types.INTERVAL_MONTHS),
"f7.cast(INTERVAL_MONTHS)",
"+1000-00")
testTableApi(
'f8.cast(Types.INTERVAL_MILLIS),
"f8.cast(INTERVAL_MILLIS)",
"+16979 07:23:33.000")
}
@Test
def testTimePointComparison(): Unit = {
testAllApis(
'f0 < 'f3,
"f0 < f3",
"f0 < f3",
"false")
testAllApis(
'f0 < 'f4,
"f0 < f4",
"f0 < f4",
"true")
testAllApis(
'f1 < 'f5,
"f1 < f5",
"f1 < f5",
"false")
testAllApis(
'f0.cast(Types.SQL_TIMESTAMP) !== 'f2,
"f0.cast(TIMESTAMP) !== f2",
"CAST(f0 AS TIMESTAMP) <> f2",
"true")
testAllApis(
'f0.cast(Types.SQL_TIMESTAMP) === 'f6,
"f0.cast(TIMESTAMP) === f6",
"CAST(f0 AS TIMESTAMP) = f6",
"true")
}
@Test
def testTimeIntervalArithmetic(): Unit = {
// interval months comparison
testAllApis(
12.months < 24.months,
"12.months < 24.months",
"INTERVAL '12' MONTH < INTERVAL '24' MONTH",
"true")
testAllApis(
8.years === 8.years,
"8.years === 8.years",
"INTERVAL '8' YEAR = INTERVAL '8' YEAR",
"true")
// interval millis comparison
testAllApis(
8.millis > 10.millis,
"8.millis > 10.millis",
"INTERVAL '0.008' SECOND > INTERVAL '0.010' SECOND",
"false")
testAllApis(
8.millis === 8.millis,
"8.millis === 8.millis",
"INTERVAL '0.008' SECOND = INTERVAL '0.008' SECOND",
"true")
// interval months addition/subtraction
testAllApis(
8.years + 10.months,
"8.years + 10.months",
"INTERVAL '8' YEAR + INTERVAL '10' MONTH",
"+8-10")
testAllApis(
2.years - 12.months,
"2.years - 12.months",
"INTERVAL '2' YEAR - INTERVAL '12' MONTH",
"+1-00")
testAllApis(
-2.years,
"-2.years",
"-INTERVAL '2' YEAR",
"-2-00")
// interval millis addition/subtraction
testAllApis(
8.hours + 10.minutes + 12.seconds + 5.millis,
"8.hours + 10.minutes + 12.seconds + 5.millis",
"INTERVAL '8' HOUR + INTERVAL '10' MINUTE + INTERVAL '12.005' SECOND",
"+0 08:10:12.005")
testAllApis(
1.minute - 10.seconds,
"1.minute - 10.seconds",
"INTERVAL '1' MINUTE - INTERVAL '10' SECOND",
"+0 00:00:50.000")
testAllApis(
-10.seconds,
"-10.seconds",
"-INTERVAL '10' SECOND",
"-0 00:00:10.000")
// addition to date
// interval millis
testAllApis(
'f0 + 2.days,
"f0 + 2.days",
"f0 + INTERVAL '2' DAY",
"1990-10-16")
// interval millis
testAllApis(
30.days + 'f0,
"30.days + f0",
"INTERVAL '30' DAY + f0",
"1990-11-13")
// interval months
testAllApis(
'f0 + 2.months,
"f0 + 2.months",
"f0 + INTERVAL '2' MONTH",
"1990-12-14")
// interval months
testAllApis(
2.months + 'f0,
"2.months + f0",
"INTERVAL '2' MONTH + f0",
"1990-12-14")
// addition to time
// interval millis
testAllApis(
'f1 + 12.hours,
"f1 + 12.hours",
"f1 + INTERVAL '12' HOUR",
"22:20:45")
// interval millis
testAllApis(
12.hours + 'f1,
"12.hours + f1",
"INTERVAL '12' HOUR + f1",
"22:20:45")
// addition to timestamp
// interval millis
testAllApis(
'f2 + 10.days + 4.millis,
"f2 + 10.days + 4.millis",
"f2 + INTERVAL '10 00:00:00.004' DAY TO SECOND",
"1990-10-24 10:20:45.127")
// interval millis
testAllApis(
10.days + 'f2 + 4.millis,
"10.days + f2 + 4.millis",
"INTERVAL '10 00:00:00.004' DAY TO SECOND + f2",
"1990-10-24 10:20:45.127")
// interval months
testAllApis(
'f2 + 10.years,
"f2 + 10.years",
"f2 + INTERVAL '10' YEAR",
"2000-10-14 10:20:45.123")
// interval months
testAllApis(
10.years + 'f2,
"10.years + f2",
"INTERVAL '10' YEAR + f2",
"2000-10-14 10:20:45.123")
// subtraction from date
// interval millis
testAllApis(
'f0 - 2.days,
"f0 - 2.days",
"f0 - INTERVAL '2' DAY",
"1990-10-12")
// interval millis
testAllApis(
-30.days + 'f0,
"-30.days + f0",
"INTERVAL '-30' DAY + f0",
"1990-09-14")
// interval months
testAllApis(
'f0 - 2.months,
"f0 - 2.months",
"f0 - INTERVAL '2' MONTH",
"1990-08-14")
// interval months
testAllApis(
-2.months + 'f0,
"-2.months + f0",
"-INTERVAL '2' MONTH + f0",
"1990-08-14")
// subtraction from time
// interval millis
testAllApis(
'f1 - 12.hours,
"f1 - 12.hours",
"f1 - INTERVAL '12' HOUR",
"22:20:45")
// interval millis
testAllApis(
-12.hours + 'f1,
"-12.hours + f1",
"INTERVAL '-12' HOUR + f1",
"22:20:45")
// subtraction from timestamp
// interval millis
testAllApis(
'f2 - 10.days - 4.millis,
"f2 - 10.days - 4.millis",
"f2 - INTERVAL '10 00:00:00.004' DAY TO SECOND",
"1990-10-04 10:20:45.119")
// interval millis
testAllApis(
-10.days + 'f2 - 4.millis,
"-10.days + f2 - 4.millis",
"INTERVAL '-10 00:00:00.004' DAY TO SECOND + f2",
"1990-10-04 10:20:45.119")
// interval months
testAllApis(
'f2 - 10.years,
"f2 - 10.years",
"f2 - INTERVAL '10' YEAR",
"1980-10-14 10:20:45.123")
// interval months
testAllApis(
-10.years + 'f2,
"-10.years + f2",
"INTERVAL '-10' YEAR + f2",
"1980-10-14 10:20:45.123")
// casting
testAllApis(
-'f9.cast(Types.INTERVAL_MONTHS),
"-f9.cast(INTERVAL_MONTHS)",
"-CAST(f9 AS INTERVAL YEAR)",
"-2-00")
testAllApis(
-'f10.cast(Types.INTERVAL_MILLIS),
"-f10.cast(INTERVAL_MILLIS)",
"-CAST(f10 AS INTERVAL SECOND)",
"-0 00:00:12.000")
// addition/subtraction of interval millis and interval months
testAllApis(
'f0 + 2.days + 1.month,
"f0 + 2.days + 1.month",
"f0 + INTERVAL '2' DAY + INTERVAL '1' MONTH",
"1990-11-16")
testAllApis(
'f0 - 2.days - 1.month,
"f0 - 2.days - 1.month",
"f0 - INTERVAL '2' DAY - INTERVAL '1' MONTH",
"1990-09-12")
testAllApis(
'f2 + 2.days + 1.month,
"f2 + 2.days + 1.month",
"f2 + INTERVAL '2' DAY + INTERVAL '1' MONTH",
"1990-11-16 10:20:45.123")
testAllApis(
'f2 - 2.days - 1.month,
"f2 - 2.days - 1.month",
"f2 - INTERVAL '2' DAY - INTERVAL '1' MONTH",
"1990-09-12 10:20:45.123")
}
// ----------------------------------------------------------------------------------------------
def testData = {
val testData = new Row(11)
testData.setField(0, Date.valueOf("1990-10-14"))
testData.setField(1, Time.valueOf("10:20:45"))
testData.setField(2, Timestamp.valueOf("1990-10-14 10:20:45.123"))
testData.setField(3, Date.valueOf("1990-10-13"))
testData.setField(4, Date.valueOf("1990-10-15"))
testData.setField(5, Time.valueOf("00:00:00"))
testData.setField(6, Timestamp.valueOf("1990-10-14 00:00:00.0"))
testData.setField(7, 12000)
testData.setField(8, 1467012213000L)
testData.setField(9, 24)
testData.setField(10, 12000L)
testData
}
def typeInfo = {
new RowTypeInfo(
Types.SQL_DATE,
Types.SQL_TIME,
Types.SQL_TIMESTAMP,
Types.SQL_DATE,
Types.SQL_DATE,
Types.SQL_TIME,
Types.SQL_TIMESTAMP,
Types.INT,
Types.LONG,
Types.INTERVAL_MONTHS,
Types.INTERVAL_MILLIS).asInstanceOf[TypeInformation[Any]]
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/aggfunctions/SumWithRetractAggFunction.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.math.BigDecimal
import java.util.{List => JList}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.TupleTypeInfo
import org.apache.flink.table.functions.{Accumulator, AggregateFunction}
/** The initial accumulator for Sum with retract aggregate function */
class SumWithRetractAccumulator[T] extends JTuple2[T, Long] with Accumulator
/**
* Base class for built-in Sum with retract aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class SumWithRetractAggFunction[T: Numeric] extends AggregateFunction[T] {
private val numeric = implicitly[Numeric[T]]
override def createAccumulator(): Accumulator = {
val acc = new SumWithRetractAccumulator[T]()
acc.f0 = numeric.zero //sum
acc.f1 = 0L //total count
acc
}
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[T]
val a = accumulator.asInstanceOf[SumWithRetractAccumulator[T]]
a.f0 = numeric.plus(a.f0, v)
a.f1 += 1
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[T]
val a = accumulator.asInstanceOf[SumWithRetractAccumulator[T]]
a.f0 = numeric.minus(a.f0, v)
a.f1 -= 1
}
}
override def getValue(accumulator: Accumulator): T = {
val a = accumulator.asInstanceOf[SumWithRetractAccumulator[T]]
if (a.f1 > 0) {
a.f0
} else {
null.asInstanceOf[T]
}
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[SumWithRetractAccumulator[T]]
var i: Int = 1
while (i < accumulators.size()) {
val a = accumulators.get(i).asInstanceOf[SumWithRetractAccumulator[T]]
ret.f0 = numeric.plus(ret.f0, a.f0)
ret.f1 += a.f1
i += 1
}
ret
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[SumWithRetractAccumulator[T]].f0 = numeric.zero
accumulator.asInstanceOf[SumWithRetractAccumulator[T]].f1 = 0L
}
override def getAccumulatorType(): TypeInformation[_] = {
new TupleTypeInfo(
(new SumWithRetractAccumulator).getClass,
getValueTypeInfo,
BasicTypeInfo.LONG_TYPE_INFO)
}
def getValueTypeInfo: TypeInformation[_]
}
/**
* Built-in Byte Sum with retract aggregate function
*/
class ByteSumWithRetractAggFunction extends SumWithRetractAggFunction[Byte] {
override def getValueTypeInfo = BasicTypeInfo.BYTE_TYPE_INFO
}
/**
* Built-in Short Sum with retract aggregate function
*/
class ShortSumWithRetractAggFunction extends SumWithRetractAggFunction[Short] {
override def getValueTypeInfo = BasicTypeInfo.SHORT_TYPE_INFO
}
/**
* Built-in Int Sum with retract aggregate function
*/
class IntSumWithRetractAggFunction extends SumWithRetractAggFunction[Int] {
override def getValueTypeInfo = BasicTypeInfo.INT_TYPE_INFO
}
/**
* Built-in Long Sum with retract aggregate function
*/
class LongSumWithRetractAggFunction extends SumWithRetractAggFunction[Long] {
override def getValueTypeInfo = BasicTypeInfo.LONG_TYPE_INFO
}
/**
* Built-in Float Sum with retract aggregate function
*/
class FloatSumWithRetractAggFunction extends SumWithRetractAggFunction[Float] {
override def getValueTypeInfo = BasicTypeInfo.FLOAT_TYPE_INFO
}
/**
* Built-in Double Sum with retract aggregate function
*/
class DoubleSumWithRetractAggFunction extends SumWithRetractAggFunction[Double] {
override def getValueTypeInfo = BasicTypeInfo.DOUBLE_TYPE_INFO
}
/** The initial accumulator for Big Decimal Sum with retract aggregate function */
class DecimalSumWithRetractAccumulator extends JTuple2[BigDecimal, Long] with Accumulator {
f0 = BigDecimal.ZERO
f1 = 0L
}
/**
* Built-in Big Decimal Sum with retract aggregate function
*/
class DecimalSumWithRetractAggFunction extends AggregateFunction[BigDecimal] {
override def createAccumulator(): Accumulator = {
new DecimalSumWithRetractAccumulator
}
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
val accum = accumulator.asInstanceOf[DecimalSumWithRetractAccumulator]
accum.f0 = accum.f0.add(v)
accum.f1 += 1L
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
val accum = accumulator.asInstanceOf[DecimalSumWithRetractAccumulator]
accum.f0 = accum.f0.subtract(v)
accum.f1 -= 1L
}
}
override def getValue(accumulator: Accumulator): BigDecimal = {
if (accumulator.asInstanceOf[DecimalSumWithRetractAccumulator].f1 == 0) {
null.asInstanceOf[BigDecimal]
} else {
accumulator.asInstanceOf[DecimalSumWithRetractAccumulator].f0
}
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[DecimalSumWithRetractAccumulator]
var i: Int = 1
while (i < accumulators.size()) {
val a = accumulators.get(i).asInstanceOf[DecimalSumWithRetractAccumulator]
ret.f0 = ret.f0.add(a.f0)
ret.f1 += a.f1
i += 1
}
ret
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[DecimalSumWithRetractAccumulator].f0 = BigDecimal.ZERO
accumulator.asInstanceOf[DecimalSumWithRetractAccumulator].f1 = 0L
}
override def getAccumulatorType(): TypeInformation[_] = {
new TupleTypeInfo(
(new DecimalSumWithRetractAccumulator).getClass,
BasicTypeInfo.BIG_DEC_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
}
|
streamline-eu/dynamic-flink | flink-runtime/src/test/scala/org/apache/flink/runtime/jobmanager/JobManagerRegistrationTest.scala | <reponame>streamline-eu/dynamic-flink
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.jobmanager
import java.net.InetAddress
import java.util.concurrent.{Executors, ScheduledExecutorService}
import akka.actor._
import akka.testkit.{ImplicitSender, TestKit}
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.akka.AkkaUtils
import org.apache.flink.runtime.clusterframework.FlinkResourceManager
import org.apache.flink.runtime.clusterframework.types.ResourceID
import org.apache.flink.runtime.highavailability.HighAvailabilityServices
import org.apache.flink.runtime.instance._
import org.apache.flink.runtime.jobmanager.JobManagerRegistrationTest.PlainForwardingActor
import org.apache.flink.runtime.messages.JobManagerMessages.LeaderSessionMessage
import org.apache.flink.runtime.messages.RegistrationMessages.{AcknowledgeRegistration, AlreadyRegistered, RegisterTaskManager}
import org.apache.flink.runtime.taskmanager.TaskManagerLocation
import org.apache.flink.runtime.testutils.TestingResourceManager
import org.apache.flink.runtime.util.LeaderRetrievalUtils
import org.junit.Assert.{assertNotEquals, assertNotNull}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Tests for the JobManager's behavior when a TaskManager solicits registration.
*/
@RunWith(classOf[JUnitRunner])
class JobManagerRegistrationTest(_system: ActorSystem) extends TestKit(_system) with
ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll {
def this() = this(AkkaUtils.createLocalActorSystem(new Configuration()))
val executor: ScheduledExecutorService = Executors.newScheduledThreadPool(2)
override def afterAll(): Unit = {
executor.shutdownNow()
TestKit.shutdownActorSystem(system)
}
"The JobManager" should {
"assign a TaskManager a unique instance ID" in {
val jm = startTestingJobManager(_system)
val rm = startTestingResourceManager(_system, jm.actor())
val tm1 = _system.actorOf(Props(new PlainForwardingActor(testActor)))
val tm2 = _system.actorOf(Props(new PlainForwardingActor(testActor)))
val resourceId1 = ResourceID.generate()
val resourceId2 = ResourceID.generate()
val connectionInfo1 = new TaskManagerLocation(resourceId1, InetAddress.getLocalHost, 10000)
val connectionInfo2 = new TaskManagerLocation(resourceId2, InetAddress.getLocalHost, 10001)
val hardwareDescription = HardwareDescription.extractFromSystem(10)
var id1: InstanceID = null
var id2: InstanceID = null
// task manager 1
within(10 seconds) {
jm.tell(
RegisterTaskManager(
resourceId1,
connectionInfo1,
hardwareDescription,
1),
new AkkaActorGateway(tm1, HighAvailabilityServices.DEFAULT_LEADER_ID))
val response = expectMsgType[LeaderSessionMessage]
response match {
case LeaderSessionMessage(_, AcknowledgeRegistration(id, _)) => id1 = id
case _ => fail("Wrong response message: " + response)
}
}
// task manager 2
within(10 seconds) {
jm.tell(
RegisterTaskManager(
resourceId2,
connectionInfo2,
hardwareDescription,
1),
new AkkaActorGateway(tm2, HighAvailabilityServices.DEFAULT_LEADER_ID))
val response = expectMsgType[LeaderSessionMessage]
response match {
case LeaderSessionMessage(leaderSessionID, AcknowledgeRegistration(id, _)) => id2 = id
case _ => fail("Wrong response message: " + response)
}
}
assertNotNull(id1)
assertNotNull(id2)
assertNotEquals(id1, id2)
}
"handle repeated registration calls" in {
val jm = startTestingJobManager(_system)
val rm = startTestingResourceManager(_system, jm.actor())
val selfGateway = new AkkaActorGateway(testActor, HighAvailabilityServices.DEFAULT_LEADER_ID)
val resourceID = ResourceID.generate()
val connectionInfo = new TaskManagerLocation(resourceID, InetAddress.getLocalHost, 1)
val hardwareDescription = HardwareDescription.extractFromSystem(10)
within(20 seconds) {
jm.tell(
RegisterTaskManager(
resourceID,
connectionInfo,
hardwareDescription,
1),
selfGateway)
jm.tell(
RegisterTaskManager(
resourceID,
connectionInfo,
hardwareDescription,
1),
selfGateway)
jm.tell(
RegisterTaskManager(
resourceID,
connectionInfo,
hardwareDescription,
1),
selfGateway)
expectMsgType[LeaderSessionMessage] match {
case LeaderSessionMessage(
HighAvailabilityServices.DEFAULT_LEADER_ID,
AcknowledgeRegistration(_, _)) =>
case m => fail("Wrong message type: " + m)
}
expectMsgType[LeaderSessionMessage] match {
case LeaderSessionMessage(
HighAvailabilityServices.DEFAULT_LEADER_ID,
AlreadyRegistered(_, _)) =>
case m => fail("Wrong message type: " + m)
}
expectMsgType[LeaderSessionMessage] match {
case LeaderSessionMessage(
HighAvailabilityServices.DEFAULT_LEADER_ID,
AlreadyRegistered(_, _)) =>
case m => fail("Wrong message type: " + m)
}
}
}
}
private def startTestingJobManager(system: ActorSystem): ActorGateway = {
val (jm: ActorRef, _) = JobManager.startJobManagerActors(
new Configuration(),
_system,
executor,
executor,
None,
None,
classOf[JobManager],
classOf[MemoryArchivist])
new AkkaActorGateway(jm, HighAvailabilityServices.DEFAULT_LEADER_ID)
}
private def startTestingResourceManager(system: ActorSystem, jm: ActorRef): ActorGateway = {
val jobManagerURL = AkkaUtils.getAkkaURL(system, jm)
val config = new Configuration()
val rm: ActorRef = FlinkResourceManager.startResourceManagerActors(
config,
_system,
LeaderRetrievalUtils.createLeaderRetrievalService(config, jm),
classOf[TestingResourceManager])
new AkkaActorGateway(rm, HighAvailabilityServices.DEFAULT_LEADER_ID)
}
}
object JobManagerRegistrationTest {
class PlainForwardingActor(private val target: ActorRef) extends Actor {
override def receive: Receive = {
case message => target.forward(message)
}
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/dataSet/DataSetSingleRowJoinRule.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.dataSet
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core._
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.dataset.DataSetSingleRowJoin
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalJoin
class DataSetSingleRowJoinRule
extends ConverterRule(
classOf[FlinkLogicalJoin],
FlinkConventions.LOGICAL,
FlinkConventions.DATASET,
"DataSetSingleRowJoinRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val join = call.rel(0).asInstanceOf[FlinkLogicalJoin]
if (isInnerJoin(join)) {
isSingleRow(join.getRight) || isSingleRow(join.getLeft)
} else {
false
}
}
private def isInnerJoin(join: FlinkLogicalJoin) = {
join.getJoinType == JoinRelType.INNER
}
/**
* Recursively checks if a [[RelNode]] returns at most a single row.
* Input must be a global aggregation possibly followed by projections or filters.
*/
private def isSingleRow(node: RelNode): Boolean = {
node match {
case ss: RelSubset => isSingleRow(ss.getOriginal)
case lp: Project => isSingleRow(lp.getInput)
case lf: Filter => isSingleRow(lf.getInput)
case lc: Calc => isSingleRow(lc.getInput)
case la: Aggregate => la.getGroupSet.isEmpty
case _ => false
}
}
override def convert(rel: RelNode): RelNode = {
val join = rel.asInstanceOf[FlinkLogicalJoin]
val traitSet = rel.getTraitSet.replace(FlinkConventions.DATASET)
val dataSetLeftNode = RelOptRule.convert(join.getLeft, FlinkConventions.DATASET)
val dataSetRightNode = RelOptRule.convert(join.getRight, FlinkConventions.DATASET)
val leftIsSingle = isSingleRow(join.getLeft)
new DataSetSingleRowJoin(
rel.getCluster,
traitSet,
dataSetLeftNode,
dataSetRightNode,
leftIsSingle,
rel.getRowType,
join.getCondition,
join.getRowType,
description)
}
}
object DataSetSingleRowJoinRule {
val INSTANCE: RelOptRule = new DataSetSingleRowJoinRule
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/call.scala | <gh_stars>1-10
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import org.apache.calcite.rex.RexNode
import org.apache.calcite.tools.RelBuilder
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.{UnresolvedException, ValidationException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils._
import org.apache.flink.table.functions.{ScalarFunction, TableFunction}
import org.apache.flink.table.plan.logical.{LogicalNode, LogicalTableFunctionCall}
import org.apache.flink.table.validate.{ValidationFailure, ValidationResult, ValidationSuccess}
/**
* General expression for unresolved function calls. The function can be a built-in
* scalar function or a user-defined scalar function.
*/
case class Call(functionName: String, args: Seq[Expression]) extends Expression {
override private[flink] def children: Seq[Expression] = args
override private[flink] def toRexNode(implicit relBuilder: RelBuilder): RexNode = {
throw UnresolvedException(s"trying to convert UnresolvedFunction $functionName to RexNode")
}
override def toString = s"\\$functionName(${args.mkString(", ")})"
override private[flink] def resultType =
throw UnresolvedException(s"calling resultType on UnresolvedFunction $functionName")
override private[flink] def validateInput(): ValidationResult =
ValidationFailure(s"Unresolved function call: $functionName")
}
/**
* Expression for calling a user-defined scalar functions.
*
* @param scalarFunction scalar function to be called (might be overloaded)
* @param parameters actual parameters that determine target evaluation method
*/
case class ScalarFunctionCall(
scalarFunction: ScalarFunction,
parameters: Seq[Expression])
extends Expression {
private var foundSignature: Option[Array[Class[_]]] = None
override private[flink] def children: Seq[Expression] = parameters
override private[flink] def toRexNode(implicit relBuilder: RelBuilder): RexNode = {
val typeFactory = relBuilder.getTypeFactory.asInstanceOf[FlinkTypeFactory]
relBuilder.call(
createScalarSqlFunction(
scalarFunction.functionIdentifier,
scalarFunction,
typeFactory),
parameters.map(_.toRexNode): _*)
}
override def toString =
s"${scalarFunction.getClass.getCanonicalName}(${parameters.mkString(", ")})"
override private[flink] def resultType = getResultType(scalarFunction, foundSignature.get)
override private[flink] def validateInput(): ValidationResult = {
val signature = children.map(_.resultType)
// look for a signature that matches the input types
foundSignature = getSignature(scalarFunction, signature)
if (foundSignature.isEmpty) {
ValidationFailure(s"Given parameters do not match any signature. \n" +
s"Actual: ${signatureToString(signature)} \n" +
s"Expected: ${signaturesToString(scalarFunction)}")
} else {
ValidationSuccess
}
}
}
/**
*
* Expression for calling a user-defined table function with actual parameters.
*
* @param functionName function name
* @param tableFunction user-defined table function
* @param parameters actual parameters of function
* @param resultType type information of returned table
*/
case class TableFunctionCall(
functionName: String,
tableFunction: TableFunction[_],
parameters: Seq[Expression],
resultType: TypeInformation[_])
extends Expression {
private var aliases: Option[Seq[String]] = None
override private[flink] def children: Seq[Expression] = parameters
/**
* Assigns an alias for this table function's returned fields that the following operator
* can refer to.
*
* @param aliasList alias for this table function's returned fields
* @return this table function call
*/
private[flink] def as(aliasList: Option[Seq[String]]): TableFunctionCall = {
this.aliases = aliasList
this
}
/**
* Converts an API class to a logical node for planning.
*/
private[flink] def toLogicalTableFunctionCall(child: LogicalNode): LogicalTableFunctionCall = {
val originNames = getFieldInfo(resultType)._1
// determine the final field names
val fieldNames = if (aliases.isDefined) {
val aliasList = aliases.get
if (aliasList.length != originNames.length) {
throw ValidationException(
s"List of column aliases must have same degree as table; " +
s"the returned table of function '$functionName' has ${originNames.length} " +
s"columns (${originNames.mkString(",")}), " +
s"whereas alias list has ${aliasList.length} columns")
} else {
aliasList.toArray
}
} else {
originNames
}
LogicalTableFunctionCall(
functionName,
tableFunction,
parameters,
resultType,
fieldNames,
child)
}
override def toString =
s"${tableFunction.getClass.getCanonicalName}(${parameters.mkString(", ")})"
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/GeneratedAggregations.scala | <filename>flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/GeneratedAggregations.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import org.apache.flink.api.common.functions.Function
import org.apache.flink.types.Row
/**
* Base class for code-generated aggregations.
*/
abstract class GeneratedAggregations extends Function {
/**
* Sets the results of the aggregations (partial or final) to the output row.
* Final results are computed with the aggregation function.
* Partial results are the accumulators themselves.
*
* @param accumulators the accumulators (saved in a row) which contains the current
* aggregated results
* @param output output results collected in a row
*/
def setAggregationResults(accumulators: Row, output: Row)
/**
* Copies forwarded fields, such as grouping keys, from input row to output row.
*
* @param input input values bundled in a row
* @param output output results collected in a row
*/
def setForwardedFields(input: Row, output: Row)
/**
* Sets constant flags (boolean fields) to an output row.
*
* @param output The output row to which the constant flags are set.
*/
def setConstantFlags(output: Row)
/**
* Accumulates the input values to the accumulators.
*
* @param accumulators the accumulators (saved in a row) which contains the current
* aggregated results
* @param input input values bundled in a row
*/
def accumulate(accumulators: Row, input: Row)
/**
* Retracts the input values from the accumulators.
*
* @param accumulators the accumulators (saved in a row) which contains the current
* aggregated results
* @param input input values bundled in a row
*/
def retract(accumulators: Row, input: Row)
/**
* Initializes the accumulators and save them to a accumulators row.
*
* @return a row of accumulators which contains the aggregated results
*/
def createAccumulators(): Row
/**
* Creates an output row object with the correct arity.
*
* @return an output row object with the correct arity.
*/
def createOutputRow(): Row
/**
* Merges two rows of accumulators into one row.
*
* @param a First row of accumulators
* @param b The other row of accumulators
* @return A row with the merged accumulators of both input rows.
*/
def mergeAccumulatorsPair(a: Row, b: Row): Row
/**
* Resets all the accumulators.
*
* @param accumulators the accumulators (saved in a row) which contains the current
* aggregated results
*/
def resetAccumulator(accumulators: Row)
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/batch/sql/AggregationsITCase.scala | <gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.batch.sql
import java.sql.Timestamp
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala.batch.utils.TableProgramsCollectionTestBase
import org.apache.flink.table.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.scala.batch.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.types.Row
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.test.util.TestBaseUtils
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class AggregationsITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testAggregationTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT sum(_1), min(_1), max(_1), count(_1), avg(_1) FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "231,1,21,21,11"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testTableAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT sum(_1) FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "231"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testDataSetAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT sum(_1) FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "231"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationDataTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT avg(a), avg(b), avg(c), avg(d), avg(e), avg(f), count(g), " +
"min(g), min('Ciao'), max(g), max('Ciao'), sum(CAST(f AS DECIMAL)) FROM MyTable"
val ds = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv, 'a, 'b, 'c, 'd, 'e, 'f, 'g)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "1,1,1,1,1.5,1.5,2,Ciao,Ciao,Hello,Ciao,3.0"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testTableProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT avg(a), sum(a), count(a), avg(b), sum(b) " +
"FROM MyTable"
val ds = env.fromElements((1: Byte, 1: Short), (2: Byte, 2: Short)).toTable(tEnv, 'a, 'b)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "1,3,2,1,3"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testTableAggregationWithArithmetic(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT avg(a + 2) + 2, count(b) + 5 " +
"FROM MyTable"
val ds = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv, 'a, 'b)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "5.5,7"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationWithTwoCount(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT count(_1), count(_2) FROM MyTable"
val ds = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "2,2"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationAfterProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT avg(a), sum(b), count(c) FROM " +
"(SELECT _1 as a, _2 as b, _3 as c FROM MyTable)"
val ds = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "1,3,2"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testDistinctAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT sum(_1) as a, count(distinct _3) as b FROM MyTable"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected = "231,21"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedDistinctAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT _2, avg(distinct _1) as a, count(_3) as b FROM MyTable GROUP BY _2"
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val expected =
"6,18,6\n5,13,5\n4,8,4\n3,5,3\n2,2,2\n1,1,1"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupingSetAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery =
"SELECT _2, _3, avg(_1) as a, GROUP_ID() as g FROM MyTable GROUP BY GROUPING SETS (_2, _3)"
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet("MyTable", ds)
val result = tEnv.sql(sqlQuery).toDataSet[Row].collect()
val expected =
"6,null,18,1\n5,null,13,1\n4,null,8,1\n3,null,5,1\n2,null,2,1\n1,null,1,1\n" +
"null,Luke Skywalker,6,2\nnull,I am fine.,5,2\nnull,Hi,1,2\n" +
"null,Hello world, how are you?,4,2\nnull,Hello world,3,2\nnull,Hello,2,2\n" +
"null,Comment#9,15,2\nnull,Comment#8,14,2\nnull,Comment#7,13,2\n" +
"null,Comment#6,12,2\nnull,Comment#5,11,2\nnull,Comment#4,10,2\n" +
"null,Comment#3,9,2\nnull,Comment#2,8,2\nnull,Comment#15,21,2\n" +
"null,Comment#14,20,2\nnull,Comment#13,19,2\nnull,Comment#12,18,2\n" +
"null,Comment#11,17,2\nnull,Comment#10,16,2\nnull,Comment#1,7,2"
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
@Test
def testAggregateEmptyDataSets(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery = "SELECT avg(a), sum(a), count(b) " +
"FROM MyTable where a = 4 group by a"
val sqlQuery2 = "SELECT avg(a), sum(a), count(b) " +
"FROM MyTable where a = 4"
val sqlQuery3 = "SELECT avg(a), sum(a), count(b) " +
"FROM MyTable"
val ds = env.fromElements(
(1: Byte, 1: Short),
(2: Byte, 2: Short))
.toTable(tEnv, 'a, 'b)
tEnv.registerTable("MyTable", ds)
val result = tEnv.sql(sqlQuery)
val result2 = tEnv.sql(sqlQuery2)
val result3 = tEnv.sql(sqlQuery3)
val results = result.toDataSet[Row].collect()
val expected = Seq.empty
val results2 = result2.toDataSet[Row].collect()
val expected2 = "null,null,0"
val results3 = result3.toDataSet[Row].collect()
val expected3 = "1,3,2"
assert(results.equals(expected),
"Empty result is expected for grouped set, but actual: " + results)
TestBaseUtils.compareResultAsText(results2.asJava, expected2)
TestBaseUtils.compareResultAsText(results3.asJava, expected3)
}
@Test
def testTumbleWindowAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val sqlQuery =
"SELECT b, SUM(a), COUNT(*)" +
"FROM T " +
"GROUP BY b, TUMBLE(ts, INTERVAL '3' SECOND)"
val ds = CollectionDataSets.get3TupleDataSet(env)
// create timestamps
.map(x => (x._1, x._2, x._3, new Timestamp(x._1 * 1000)))
tEnv.registerDataSet("T", ds, 'a, 'b, 'c, 'ts)
val result = tEnv.sql(sqlQuery).toDataSet[Row].collect()
val expected = Seq(
"1,1,1",
"2,2,1", "2,3,1",
"3,9,2", "3,6,1",
"4,15,2", "4,19,2",
"5,11,1", "5,39,3", "5,15,1",
"6,33,2", "6,57,3", "6,21,1"
).mkString("\n")
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
@Test
def testHopWindowAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
env.setParallelism(1)
val sqlQuery =
"SELECT b, SUM(a), COUNT(*)" +
"FROM T " +
"GROUP BY b, HOP(ts, INTERVAL '2' SECOND, INTERVAL '4' SECOND)"
val ds = CollectionDataSets.get3TupleDataSet(env)
// create timestamps
.map(x => (x._1, x._2, x._3, new Timestamp(x._1 * 1000)))
tEnv.registerDataSet("T", ds, 'a, 'b, 'c, 'ts)
val result = tEnv.sql(sqlQuery).toDataSet[Row].collect()
val expected = Seq(
"1,1,1","1,1,1",
"2,5,2","2,5,2",
"3,9,2", "3,15,3", "3,6,1",
"4,7,1", "4,24,3", "4,27,3", "4,10,1",
"5,11,1", "5,36,3", "5,54,4", "5,29,2",
"6,33,2", "6,70,4", "6,78,4", "6,41,2"
).mkString("\n")
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
@Test
def testSessionWindowAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
env.setParallelism(1)
val sqlQuery =
"SELECT MIN(a), MAX(a), SUM(a), COUNT(*)" +
"FROM T " +
"GROUP BY SESSION(ts, INTERVAL '4' SECOND)"
val ds = CollectionDataSets.get3TupleDataSet(env)
// create timestamps
.filter(x => (x._2 % 2) == 0)
.map(x => (x._1, x._2, x._3, new Timestamp(x._1 * 1000)))
tEnv.registerDataSet("T", ds, 'a, 'b, 'c, 'ts)
val result = tEnv.sql(sqlQuery).toDataSet[Row].collect()
val expected = Seq(
"2,10,39,6",
"16,21,111,6"
).mkString("\n")
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/AggregateFunction.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions
import java.util.{List => JList}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.TableException
/**
* Base class for User-Defined Aggregates.
*
* @tparam T the type of the aggregation result
*/
abstract class AggregateFunction[T] extends UserDefinedFunction {
/**
* Creates and init the Accumulator for this [[AggregateFunction]].
*
* @return the accumulator with the initial value
*/
def createAccumulator(): Accumulator
/**
* Retracts the input values from the accumulator instance. The current design assumes the
* inputs are the values that have been previously accumulated.
*
* @param accumulator the accumulator which contains the current
* aggregated results
* @param input the input value (usually obtained from a new arrived data)
*/
def retract(accumulator: Accumulator, input: Any): Unit = {
throw TableException("Retract is an optional method. There is no default implementation. You " +
"must implement one for yourself.")
}
/**
* Called every time when an aggregation result should be materialized.
* The returned value could be either an early and incomplete result
* (periodically emitted as data arrive) or the final result of the
* aggregation.
*
* @param accumulator the accumulator which contains the current
* aggregated results
* @return the aggregation result
*/
def getValue(accumulator: Accumulator): T
/**
* Processes the input values and update the provided accumulator instance.
*
* @param accumulator the accumulator which contains the current
* aggregated results
* @param input the input value (usually obtained from a new arrived data)
*/
def accumulate(accumulator: Accumulator, input: Any): Unit
/**
* Merges a list of accumulator instances into one accumulator instance.
*
* IMPORTANT: You may only return a new accumulator instance or the first accumulator of the
* input list. If you return another instance, the result of the aggregation function might be
* incorrect.
*
* @param accumulators the [[java.util.List]] of accumulators that will be merged
* @return the resulting accumulator
*/
def merge(accumulators: JList[Accumulator]): Accumulator
/**
* Resets the Accumulator for this [[AggregateFunction]].
*
* @param accumulator the accumulator which needs to be reset
*/
def resetAccumulator(accumulator: Accumulator): Unit
/**
* Returns the [[TypeInformation]] of the accumulator.
* This function is optional and can be implemented if the accumulator type cannot automatically
* inferred from the instance returned by [[createAccumulator()]].
*
* @return The type information for the accumulator.
*/
def getAccumulatorType: TypeInformation[_] = null
}
/**
* Base class for aggregate Accumulator. The accumulator is used to keep the
* aggregated values which are needed to compute an aggregation result.
* The state of the function must be put into the accumulator.
*
* TODO: We have the plan to have the accumulator and return types of
* functions dynamically provided by the users. This needs the refactoring
* of the AggregateFunction interface with the code generation. We will remove
* the [[Accumulator]] once codeGen for UDAGG is completed (FLINK-5813).
*/
trait Accumulator
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/Types.scala | <gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.api.common.typeinfo.{Types, TypeInformation}
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
import org.apache.flink.api.common.typeinfo.{Types => JTypes}
/**
* This class enumerates all supported types of the Table API.
*/
object Types extends JTypes {
val STRING = JTypes.STRING
val BOOLEAN = JTypes.BOOLEAN
val BYTE = JTypes.BYTE
val SHORT = JTypes.SHORT
val INT = JTypes.INT
val LONG = JTypes.LONG
val FLOAT = JTypes.FLOAT
val DOUBLE = JTypes.DOUBLE
val DECIMAL = JTypes.DECIMAL
val SQL_DATE = JTypes.SQL_DATE
val SQL_TIME = JTypes.SQL_TIME
val SQL_TIMESTAMP = JTypes.SQL_TIMESTAMP
val INTERVAL_MONTHS = TimeIntervalTypeInfo.INTERVAL_MONTHS
val INTERVAL_MILLIS = TimeIntervalTypeInfo.INTERVAL_MILLIS
/**
* Generates RowTypeInfo with default names (f1, f2 ..).
* same as ``new RowTypeInfo(types)``
*
* @param types of Row fields. e.g. ROW(Types.STRING, Types.INT)
*/
def ROW[T: Manifest](types: TypeInformation[_]*) = {
JTypes.ROW(types: _*)
}
/**
* Generates RowTypeInfo.
* same as ``new RowTypeInfo(types, names)``
*
* @param fields of Row. e.g. ROW(("name", Types.STRING), ("number", Types.INT))
*/
def ROW_NAMED(fields: (String, TypeInformation[_])*) = {
val names = fields.toList.map(_._1).toArray
val types = fields.toList.map(_._2)
JTypes.ROW_NAMED(names, types: _*)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/aggfunctions/CountAggFunction.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.util.{List => JList}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.tuple.{Tuple1 => JTuple1}
import org.apache.flink.api.java.typeutils.TupleTypeInfo
import org.apache.flink.table.functions.{Accumulator, AggregateFunction}
/** The initial accumulator for count aggregate function */
class CountAccumulator extends JTuple1[Long] with Accumulator {
f0 = 0L //count
}
/**
* built-in count aggregate function
*/
class CountAggFunction extends AggregateFunction[Long] {
override def accumulate(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
accumulator.asInstanceOf[CountAccumulator].f0 += 1L
}
}
override def retract(accumulator: Accumulator, value: Any): Unit = {
if (value != null) {
accumulator.asInstanceOf[CountAccumulator].f0 -= 1L
}
}
override def getValue(accumulator: Accumulator): Long = {
accumulator.asInstanceOf[CountAccumulator].f0
}
override def merge(accumulators: JList[Accumulator]): Accumulator = {
val ret = accumulators.get(0).asInstanceOf[CountAccumulator]
var i: Int = 1
while (i < accumulators.size()) {
ret.f0 += accumulators.get(i).asInstanceOf[CountAccumulator].f0
i += 1
}
ret
}
override def createAccumulator(): Accumulator = {
new CountAccumulator
}
override def resetAccumulator(accumulator: Accumulator): Unit = {
accumulator.asInstanceOf[CountAccumulator].f0 = 0L
}
override def getAccumulatorType(): TypeInformation[_] = {
new TupleTypeInfo((new CountAccumulator).getClass, BasicTypeInfo.LONG_TYPE_INFO)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamValues.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Values
import org.apache.calcite.rex.RexLiteral
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.table.api.StreamTableEnvironment
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.CodeGenerator
import org.apache.flink.table.runtime.io.ValuesInputFormat
import org.apache.flink.types.Row
import scala.collection.JavaConverters._
/**
* DataStream RelNode for LogicalValues.
*/
class DataStreamValues(
cluster: RelOptCluster,
traitSet: RelTraitSet,
rowRelDataType: RelDataType,
tuples: ImmutableList[ImmutableList[RexLiteral]],
ruleDescription: String)
extends Values(cluster, rowRelDataType, tuples, traitSet)
with DataStreamRel {
override def deriveRowType() = rowRelDataType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataStreamValues(
cluster,
traitSet,
getRowType,
getTuples,
ruleDescription
)
}
override def translateToPlan(tableEnv: StreamTableEnvironment): DataStream[Row] = {
val config = tableEnv.getConfig
val returnType = FlinkTypeFactory.toInternalRowTypeInfo(getRowType)
val generator = new CodeGenerator(config)
// generate code for every record
val generatedRecords = getTuples.asScala.map { r =>
generator.generateResultExpression(
returnType,
getRowType.getFieldNames.asScala,
r.asScala)
}
// generate input format
val generatedFunction = generator.generateValuesInputFormat(
ruleDescription,
generatedRecords.map(_.code),
returnType)
val inputFormat = new ValuesInputFormat[Row](
generatedFunction.name,
generatedFunction.code,
generatedFunction.returnType)
tableEnv.execEnv.createInput(inputFormat, returnType)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamLogicalWindowAggregateRule.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.datastream
import java.math.BigDecimal
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rex.{RexBuilder, RexCall, RexLiteral, RexNode}
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.flink.table.api.{TableException, Window}
import org.apache.flink.table.api.scala.{Session, Slide, Tumble}
import org.apache.flink.table.expressions.Literal
import org.apache.flink.table.functions.TimeModeTypes
import org.apache.flink.table.plan.rules.common.LogicalWindowAggregateRule
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
class DataStreamLogicalWindowAggregateRule
extends LogicalWindowAggregateRule("DataStreamLogicalWindowAggregateRule") {
/** Returns a zero literal of the correct time type */
override private[table] def getInAggregateGroupExpression(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode = createZeroLiteral(rexBuilder, windowExpression)
/** Returns a zero literal of the correct time type */
override private[table] def getOutAggregateGroupExpression(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode = createZeroLiteral(rexBuilder, windowExpression)
private def createZeroLiteral(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode = {
val timeType = windowExpression.operands.get(0).getType
timeType match {
case TimeModeTypes.ROWTIME =>
rexBuilder.makeAbstractCast(
TimeModeTypes.ROWTIME,
rexBuilder.makeLiteral(0L, TimeModeTypes.ROWTIME, true))
case TimeModeTypes.PROCTIME =>
rexBuilder.makeAbstractCast(
TimeModeTypes.PROCTIME,
rexBuilder.makeLiteral(0L, TimeModeTypes.PROCTIME, true))
case _ =>
throw TableException(s"""Unexpected time type $timeType encountered""")
}
}
override private[table] def translateWindowExpression(
windowExpr: RexCall,
rowType: RelDataType): Window = {
def getOperandAsLong(call: RexCall, idx: Int): Long =
call.getOperands.get(idx) match {
case v : RexLiteral => v.getValue.asInstanceOf[BigDecimal].longValue()
case _ => throw new TableException("Only constant window descriptors are supported")
}
windowExpr.getOperator match {
case SqlStdOperatorTable.TUMBLE =>
val interval = getOperandAsLong(windowExpr, 1)
val w = Tumble.over(Literal(interval, TimeIntervalTypeInfo.INTERVAL_MILLIS))
val window = windowExpr.getType match {
case TimeModeTypes.PROCTIME => w
case TimeModeTypes.ROWTIME => w.on("rowtime")
}
window.as("w$")
case SqlStdOperatorTable.HOP =>
val (slide, size) = (getOperandAsLong(windowExpr, 1), getOperandAsLong(windowExpr, 2))
val w = Slide
.over(Literal(size, TimeIntervalTypeInfo.INTERVAL_MILLIS))
.every(Literal(slide, TimeIntervalTypeInfo.INTERVAL_MILLIS))
val window = windowExpr.getType match {
case TimeModeTypes.PROCTIME => w
case TimeModeTypes.ROWTIME => w.on("rowtime")
}
window.as("w$")
case SqlStdOperatorTable.SESSION =>
val gap = getOperandAsLong(windowExpr, 1)
val w = Session.withGap(Literal(gap, TimeIntervalTypeInfo.INTERVAL_MILLIS))
val window = windowExpr.getType match {
case TimeModeTypes.PROCTIME => w
case TimeModeTypes.ROWTIME => w.on("rowtime")
}
window.as("w$")
}
}
}
object DataStreamLogicalWindowAggregateRule {
val INSTANCE = new DataStreamLogicalWindowAggregateRule
}
|
streamline-eu/dynamic-flink | flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingTaskManager.scala | <filename>flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingTaskManager.scala<gh_stars>10-100
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.testingUtils
import org.apache.flink.runtime.clusterframework.types.ResourceID
import org.apache.flink.runtime.io.disk.iomanager.IOManager
import org.apache.flink.runtime.io.network.NetworkEnvironment
import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService
import org.apache.flink.runtime.memory.MemoryManager
import org.apache.flink.runtime.metrics.MetricRegistry
import org.apache.flink.runtime.taskexecutor.TaskManagerConfiguration
import org.apache.flink.runtime.taskmanager.{TaskManager, TaskManagerLocation}
import scala.language.postfixOps
/** Subclass of the [[TaskManager]] to support testing messages
*/
class TestingTaskManager(
config: TaskManagerConfiguration,
resourceID: ResourceID,
connectionInfo: TaskManagerLocation,
memoryManager: MemoryManager,
ioManager: IOManager,
network: NetworkEnvironment,
numberOfSlots: Int,
leaderRetrievalService: LeaderRetrievalService,
metricRegistry : MetricRegistry)
extends TaskManager(
config,
resourceID,
connectionInfo,
memoryManager,
ioManager,
network,
numberOfSlots,
leaderRetrievalService,
metricRegistry)
with TestingTaskManagerLike {
def this(
config: TaskManagerConfiguration,
connectionInfo: TaskManagerLocation,
memoryManager: MemoryManager,
ioManager: IOManager,
network: NetworkEnvironment,
numberOfSlots: Int,
leaderRetrievalService: LeaderRetrievalService,
metricRegistry : MetricRegistry) {
this(
config,
ResourceID.generate(),
connectionInfo,
memoryManager,
ioManager,
network,
numberOfSlots,
leaderRetrievalService,
metricRegistry)
}
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalAggregate.scala | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import java.util.{List => JList}
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.{Aggregate, AggregateCall}
import org.apache.calcite.rel.logical.LogicalAggregate
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.util.ImmutableBitSet
import org.apache.flink.table.plan.nodes.FlinkConventions
class FlinkLogicalAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
child: RelNode,
indicator: Boolean,
groupSet: ImmutableBitSet,
groupSets: JList[ImmutableBitSet],
aggCalls: JList[AggregateCall])
extends Aggregate(cluster, traitSet, child, indicator, groupSet, groupSets, aggCalls)
with FlinkLogicalRel {
override def copy(
traitSet: RelTraitSet,
input: RelNode,
indicator: Boolean,
groupSet: ImmutableBitSet,
groupSets: JList[ImmutableBitSet],
aggCalls: JList[AggregateCall]): Aggregate = {
new FlinkLogicalAggregate(cluster, traitSet, input, indicator, groupSet, groupSets, aggCalls)
}
override def computeSelfCost(planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val child = this.getInput
val rowCnt = metadata.getRowCount(child)
val rowSize = this.estimateRowSize(child.getRowType)
val aggCnt = this.aggCalls.size
planner.getCostFactory.makeCost(rowCnt, rowCnt * aggCnt, rowCnt * rowSize)
}
}
private class FlinkLogicalAggregateConverter
extends ConverterRule(
classOf[LogicalAggregate],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalAggregateConverter") {
override def matches(call: RelOptRuleCall): Boolean = {
val agg = call.rel(0).asInstanceOf[LogicalAggregate]
!agg.containsDistinctCall()
}
override def convert(rel: RelNode): RelNode = {
val agg = rel.asInstanceOf[LogicalAggregate]
val traitSet = rel.getTraitSet.replace(FlinkConventions.LOGICAL)
val newInput = RelOptRule.convert(agg.getInput, FlinkConventions.LOGICAL)
new FlinkLogicalAggregate(
rel.getCluster,
traitSet,
newInput,
agg.indicator,
agg.getGroupSet,
agg.getGroupSets,
agg.getAggCallList)
}
}
object FlinkLogicalAggregate {
val CONVERTER: ConverterRule = new FlinkLogicalAggregateConverter()
}
|
streamline-eu/dynamic-flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/BatchScan.scala | <filename>flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/BatchScan.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.flink.api.java.DataSet
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.plan.nodes.CommonScan
import org.apache.flink.table.plan.schema.FlinkTable
import org.apache.flink.types.Row
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
trait BatchScan extends CommonScan with DataSetRel {
protected def convertToInternalRow(
input: DataSet[Any],
flinkTable: FlinkTable[_],
config: TableConfig)
: DataSet[Row] = {
val inputType = input.getType
val internalType = FlinkTypeFactory.toInternalRowTypeInfo(getRowType)
// conversion
if (needsConversion(inputType, internalType)) {
val mapFunc = getConversionMapper(
config,
inputType,
internalType,
"DataSetSourceConversion",
getRowType.getFieldNames,
Some(flinkTable.fieldIndexes))
val opName = s"from: (${getRowType.getFieldNames.asScala.toList.mkString(", ")})"
input.map(mapFunc).name(opName)
}
// no conversion necessary, forward
else {
input.asInstanceOf[DataSet[Row]]
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.