repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
---|---|---|
MaksGovor/FP-labs | lab2_OOP/src/main/scala/objsets/PostReader.scala | <reponame>MaksGovor/FP-labs
package objsets
import java.lang.StringBuffer
object PostReader {
object Parseposts {
def regexParser(s: String): List[Map[String, Any]] = {
// In real life. you would use an actual JSON library...
val postRegex = """^\{ .*"user": "([^"]+)", "text": "([^"]+)", "likes": ([\\.0-9]+) \},?$""".r
s.split("\r?\n").toList.tail.init.map {
case postRegex(user, text, likes) => Map("user" -> user, "text" -> text, "likes" -> likes.toDouble)
}
}
def getposts(user: String, json: String): List[Post] = {
for {
map <- regexParser(json)}
yield {
val text = map("text")
val likes = map("like_count")
new Post(user, text.toString, likes.toString.toDouble.toInt)
}
}
def getPostData(user: String, json: String): List[Post] = {
// is list
val l = regexParser(json)
for {
map <- l
} yield {
val text = map("text")
val likes = map("likes")
new Post(user, text.toString, likes.toString.toDouble.toInt)
}
}
}
def topostset(l: List[Post]): PostSet =
l.foldLeft(new Empty(): PostSet)(_.incl(_))
def unparseToData(tws: List[Post]): String = {
val buf = new StringBuffer()
tws.foreach { tw =>
val json = "{ \"user\": \"" + tw.user + "\", \"text\": \"" +
tw.text.replaceAll(""""""", "\\\\\\\"") + "\", \"likes\": " +
tw.likes + ".0 }"
buf.append(json + ",\n")
}
buf.toString
}
val sites = List("gizmodo", "TechCrunch", "engadget", "amazondeals", "CNET", "gadgetlab", "mashable")
private val gizmodoposts = PostReader.Parseposts.getPostData("gizmodo", PostData.gizmodo)
private val techCrunchposts = PostReader.Parseposts.getPostData("TechCrunch", PostData.TechCrunch)
private val engadgetposts = PostReader.Parseposts.getPostData("engadget", PostData.engadget)
private val amazondealsposts = PostReader.Parseposts.getPostData("amazondeals", PostData.amazondeals)
private val cnetposts = PostReader.Parseposts.getPostData("CNET", PostData.CNET)
private val gadgetlabposts = PostReader.Parseposts.getPostData("gadgetlab", PostData.gadgetlab)
private val mashableposts = PostReader.Parseposts.getPostData("mashable", PostData.mashable)
private val sources = List(gizmodoposts, techCrunchposts, engadgetposts, amazondealsposts, cnetposts, gadgetlabposts, mashableposts)
val postMap: Map[String, List[Post]] =
Map() ++ Seq((sites(0) -> gizmodoposts),
(sites(1) -> techCrunchposts),
(sites(2) -> engadgetposts),
(sites(3) -> amazondealsposts),
(sites(4) -> cnetposts),
(sites(5) -> gadgetlabposts),
(sites(6) -> mashableposts))
val postsets: List[PostSet] = sources.map(posts => topostset(posts))
private def unionOfAllpostsets(curSets: List[PostSet], acc: PostSet): PostSet =
if (curSets.isEmpty)
acc
else
unionOfAllpostsets(curSets.tail, acc.union(curSets.head))
val allposts: PostSet = unionOfAllpostsets(postsets, new Empty())
}
|
MaksGovor/FP-labs | lab2_OOP/build.sbt | <gh_stars>0
name := "lab2_OOP"
version := "0.1"
scalaVersion := "2.13.6"
libraryDependencies += "org.scalameta" %% "munit" % "0.4.3" % Test
|
MaksGovor/FP-labs | scalashop/src/main/scala/scalashop/Interfaces.scala | package scalashop
// Interfaces used by the grading infrastructure. Do not change signatures
// or your submission will fail with a NoSuchMethodError.
trait HorizontalBoxBlurInterface {
def blur(src: Img, dst: Img, from: Int, end: Int, radius: Int): Unit
def parBlur(src: Img, dst: Img, numTasks: Int, radius: Int): Unit
}
trait VerticalBoxBlurInterface {
def blur(src: Img, dst: Img, from: Int, end: Int, radius: Int): Unit
def parBlur(src: Img, dst: Img, numTasks: Int, radius: Int): Unit
}
trait BoxBlurKernelInterface {
def boxBlurKernel(src: Img, x: Int, y: Int, radius: Int): RGBA
}
|
MaksGovor/FP-labs | streams/src/main/scala/streams/StringParserTerrain.scala | <reponame>MaksGovor/FP-labs
package streams
/**
* This component implements a parser to define terrains from a
* graphical ASCII representation.
*
* When mixing in that component, a level can be defined by
* defining the field `level` in the following form:
*
* val level =
* """------
* |--ST--
* |--oo--
* |--oo--
* |------""".stripMargin
*
* - The `-` character denotes parts which are outside the terrain
* - `o` denotes fields which are part of the terrain
* - `S` denotes the start position of the block (which is also considered
inside the terrain)
* - `T` denotes the final position of the block (which is also considered
inside the terrain)
*
* In this example, the first and last lines could be omitted, and
* also the columns that consist of `-` characters only.
*/
trait StringParserTerrain extends GameDef {
/**
* A ASCII representation of the terrain. This field should remain
* abstract here.
*/
val level: String
/**
* This method returns terrain function that represents the terrain
* in `levelVector`. The vector contains parsed version of the `level`
* string. For example, the following level
*
* val level =
* """ST
* |oo
* |oo""".stripMargin
*
* is represented as
*
* Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o'))
*
* The resulting function should return `true` if the position `pos` is
* a valid position (not a '-' character) inside the terrain described
* by `levelVector`.
*/
def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = pos => {
try {
levelVector(pos.row)(pos.col) != '-'
} catch {
case err: IndexOutOfBoundsException => false
}
}
/**
* This function should return the position of character `c` in the
* terrain described by `levelVector`. You can assume that the `c`
* appears exactly once in the terrain.
*
* Hint: you can use the functions `indexWhere` and / or `indexOf` of the
* `Vector` class
*/
def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = {
val row = levelVector.indexWhere(_.contains(c))
Pos(row, levelVector(row).indexOf(c))
}
private lazy val vector: Vector[Vector[Char]] =
Vector(level.split("\r?\n").map(str => Vector(str: _*)).toIndexedSeq: _*)
lazy val terrain: Terrain = terrainFunction(vector)
lazy val startPos: Pos = findChar('S', vector)
lazy val goal: Pos = findChar('T', vector)
}
|
MaksGovor/FP-labs | quickcheck/src/main/scala/quickcheck/IndividualTask.scala | <filename>quickcheck/src/main/scala/quickcheck/IndividualTask.scala
package quickcheck
import org.scalacheck._
import Arbitrary._
import Gen._
import Prop._
import scala.annotation.tailrec
trait IndividualTask {
lazy val posGen: Gen[Int] = choose(2, 100)
lazy val uncertaintyGen: Gen[Int] = oneOf(const(0), const(1))
@tailrec
final def iterateFact(x: Int, result: BigInt = 1): BigInt =
if (x == 0) result
else iterateFact(x - 1, result * x)
val countExpression: PartialFunction[Int, BigInt] = {
case x: Int if (x < 0 || x > 1) => {
if (x < 0) 0
else if (x >= 0 && x <= 1) throw new IllegalArgumentException("function not defined on [0, 1]")
else iterateFact(x)
}
}
val liftedCE: Int => Option[BigInt] = countExpression.lift
}
object QuickCheckIndividualTask extends Properties("IndividualTask") with IndividualTask {
property("all numbers less than zero must return zero") = forAll { (x: Int) =>
x < 0 ==> {
liftedCE(x).contains(0)
}
}
property("in the area of uncertainty returns None") = forAll(uncertaintyGen)((x: Int) => liftedCE(x).isEmpty)
property("factorial should be returned for all positive numbers > 1") = forAll(posGen) { (x: Int) =>
liftedCE(x).contains(iterateFact(x))
}
}
|
MaksGovor/FP-labs | scalashop/src/test/scala/scalashop/BlurSuite.scala | <gh_stars>0
package scalashop
import java.util.concurrent._
import scala.collection._
import org.junit._
import org.junit.Assert.{assertArrayEquals, assertEquals}
class BlurSuite {
def boxBlurKernelBad(src: Img, x: Int, y: Int, radius: Int): RGBA = {
val clampX = clamp(_, 0, src.width - 1)
val clampY = clamp(_, 0, src.height - 1)
var res, counter = 0
var xs = clampX(x - radius)
while (xs <= clampX(x + radius)) {
var ys = clampY(y - radius)
while (ys <= clampY(y + radius)) {
res += src(xs, ys)
ys += 1
counter += 1
}
xs += 1
}
res / counter
}
@Test def `boxBlurKernel should return the correct value on an interior pixel of a 3x4 image with radius 1`: Unit = {
val src = new Img(3, 4,
Array(
4, 5, 7,
1, 2, 4,
5, 12, 1,
10, 3, 4
))
assert(boxBlurKernel(src, 0, 0, 1) == 3)
assert(boxBlurKernel(src, 1, 0, 1) == 3)
assert(boxBlurKernel(src, 2, 0, 1) == 4)
assert(boxBlurKernel(src, 0, 2, 1) == 5)
assert(boxBlurKernel(src, 1, 2, 1) == 4)
assert(boxBlurKernel(src, 2, 2, 1) == 4)
}
@Test def `boxBlurKernel should correctly handle radius 0`: Unit = {
val src = new Img(3, 3, Array(1, 2, 3, 4, 5, 6, 7, 8, 9))
for (x <- 0 until 3; y <- 0 until 3) {
assert(src(x, y) == boxBlurKernel(src, x, y, 0))
}
}
@Test def `boxBlurKernel should compute the averages of red, blue, green and alpha channels separately`: Unit = {
val src = new Img(3, 3, Array(256, 258, 123, 255, 256, 19, 12, 12, 7))
assert(boxBlurKernel(src, 0, 0, 1) == 64 && boxBlurKernelBad(src, 0, 0, 1) == 256)
assert(boxBlurKernel(src, 1, 0, 1) == 66 && boxBlurKernelBad(src, 1, 0, 1) == 194)
assert(boxBlurKernel(src, 2, 0, 1) == 36 && boxBlurKernelBad(src, 2, 0, 1) == 164)
}
@Test def `HorizontalBoxBlur/VerticalBoxBlur parBlur with 32 tasks should modify each pixel exactly once`: Unit = {
val src = new Img(32, 32)
val dstH = new Img(32, 32)
val dstHPar = new Img(32, 32)
val dstV = new Img(32, 32)
val dstVPar = new Img(32, 32)
val numTasks = 32
for (x <- 0 until 32; y <- 0 until 32) src(x, y) = rgba(2 * x, 3 * y, x + y, x)
HorizontalBoxBlur.blur(src, dstH, 0, src.height, 3)
HorizontalBoxBlur.parBlur(src, dstHPar, numTasks, 3)
VerticalBoxBlur.blur(src, dstV, 0, src.width, 3)
VerticalBoxBlur.parBlur(src, dstVPar, numTasks, 3)
for (x <- 0 until 32; y <- 0 until 32) {
assert(dstH(x, y) == dstHPar(x, y))
assert(dstV(x, y) == dstVPar(x, y))
}
}
@Test def `HorizontalBoxBlur/VerticalBoxBlur with radius 1 and 4 tasks should correctly blur the image` = {
val src = new Img(3, 3, Array(35, 122, 145, 92, 231, 12, 46, 81, 182))
val dstV = new Img(3, 3)
val dstH = new Img(3, 3)
HorizontalBoxBlur.blur(src, dstH, 0, src.height, 1)
VerticalBoxBlur.blur(src, dstV, 0, src.width, 1)
for (x <- 0 until 3; y <- 0 until 3) {
assert(dstH(x, y) == boxBlurKernel(src, x, y, 1))
assert(dstV(x, y) == boxBlurKernel(src, x, y, 1))
}
}
@Test def `HorizontalBoxBlur/VerticalBoxBlur parBlur should not forget the last strip` = {
val src = new Img(32, 32)
for (x <- 0 until 32; y <- 0 until 32) src(x, y) = rgba(2 * x, 3 * y, x + y, x)
val dstHPar = new Img(32, 32)
val dstVPar = new Img(32, 32)
val numTasks = 16
val range = 0 to src.width by src.width / Math.min(src.width, numTasks)
val (from, to) = (range zip range.tail).last
HorizontalBoxBlur.parBlur(src, dstHPar, numTasks, 3)
VerticalBoxBlur.parBlur(src, dstVPar, numTasks, 3)
for (cr <- from until to; rc <- 0 until 32) {
assert(dstHPar(rc, cr) == boxBlurKernel(src, rc, cr, 3))
assert(dstVPar(cr, rc) == boxBlurKernel(src, cr, rc, 3))
}
}
@Rule def individualTestTimeout = new org.junit.rules.Timeout(10 * 1000)
}
|
MaksGovor/FP-labs | streams/build.sbt | name := "lab3_Collections"
version := "0.1"
scalaVersion := "2.13.6"
libraryDependencies += "org.scalameta" %% "munit" % "0.7.27" % Test |
MaksGovor/FP-labs | streams/src/test/scala/streams/BloxorzSuite.scala | <filename>streams/src/test/scala/streams/BloxorzSuite.scala
package streams
class BloxorzSuite extends munit.FunSuite {
trait SolutionChecker extends GameDef with Solver with StringParserTerrain {
/**
* This method applies a list of moves `ls` to the block at position
* `startPos`. This can be used to verify if a certain list of moves
* is a valid solution, i.e. leads to the goal.
*/
def solve(ls: List[Move]): Block =
ls.foldLeft(startBlock) { case (block, move) =>
require(block.isLegal) // The solution must always lead to legal blocks
move match {
case Left => block.left
case Right => block.right
case Up => block.up
case Down => block.down
}
}
}
trait Level1 extends SolutionChecker {
/* terrain for level 1*/
val level =
"""ooo-------
|oSoooo----
|ooooooooo-
|-ooooooooo
|-----ooToo
|------ooo-""".stripMargin
val optsolution = List(Right, Right, Down, Right, Right, Right, Down)
}
test("terrain function level 1 (10pts)") {
new Level1 {
assert(terrain(Pos(0, 0)), "0,0")
assert(terrain(Pos(1, 1)), "1,1") // start
assert(terrain(Pos(4, 7)), "4,7") // goal
assert(terrain(Pos(5, 8)), "5,8")
assert(!terrain(Pos(5, 9)), "5,9")
assert(terrain(Pos(4, 9)), "4,9")
assert(!terrain(Pos(6, 8)), "6,8")
assert(!terrain(Pos(4, 11)), "4,11")
assert(!terrain(Pos(-1, 0)), "-1,0")
assert(!terrain(Pos(0, -1)), "0,-1")
}
}
test("find char level 1 (10pts)") {
new Level1 {
assertEquals(Pos(1, 1), startPos)
}
}
test("optimal solution for level 1 (5pts)") {
new Level1 {
assertEquals(Block(goal, goal), solve(solution))
}
}
test("optimal solution length for level 1 (5pts)") {
new Level1 {
assertEquals(optsolution.length, solution.length)
}
}
test("neighborsWithHistory for level 1") {
new Level1 {
val actual = neighborsWithHistory(Block(Pos(1,1),Pos(1,1)), List(Left,Up)).toSet
val expected: Set[(Block, List[Move])] = Set(
(Block(Pos(1,2),Pos(1,3)), List(Right,Left,Up)),
(Block(Pos(2,1),Pos(3,1)), List(Down,Left,Up))
)
assertEquals(actual, expected, "the neighbors are in the wrong place")
}
}
test("newNeighborsOnly for level 1") {
new Level1 {
val actual = newNeighborsOnly(
Set(
(Block(Pos(1,2),Pos(1,3)), List(Right,Left,Up)),
(Block(Pos(2,1),Pos(3,1)), List(Down,Left,Up))
).to(LazyList),
Set(Block(Pos(1,2),Pos(1,3)), Block(Pos(1,1),Pos(1,1)))
)
val expected: LazyList[(Block, List[Move])] = Set(
(Block(Pos(2,1),Pos(3,1)), List(Down,Left,Up))
).to(LazyList)
}
}
import scala.concurrent.duration._
override val munitTimeout: FiniteDuration = 10.seconds
}
|
MaksGovor/FP-labs | streams/src/test/scala/streams/IndividualTaskSuite.scala | package streams
import streams.IndividualTask.{countExpression, toList}
class IndividualTaskSuite extends munit.FunSuite {
val inc: BigInt => BigInt = x => x + 1
val deInc: BigInt => BigInt = x => x - 1
val transform = List(inc, deInc)
val range = (-250 to 25)
val testList = range filter countExpression.isDefinedAt map countExpression
test("Test map on testList") {
val testMap = testList.map(elem => transform.foldLeft(elem)((v, f) => f(v)))
assertEquals(testMap, testList)
}
test("Test takeWhile, dropWhile, span on testList") {
val testTakeWhile = testList.takeWhile(_ <= 40320)
val testDropWhile = testList.dropWhile(_ <= 40320)
val testSpan = testList.span(_ <= 40320)
val expectedTakenList = (-250 to 8).filter(countExpression.isDefinedAt).map(countExpression)
val expectedDroppedList = (9 to 25).filter(countExpression.isDefinedAt).map(countExpression)
assertEquals(testTakeWhile, expectedTakenList)
assertEquals(testDropWhile, expectedDroppedList)
assertEquals(testSpan, (expectedTakenList, expectedDroppedList))
}
test("Test find, count on testList") {
assertEquals(testList.find(_ == 6), Some(BigInt(6)))
assertEquals(testList.find(_ <= 6), Some(BigInt(0)))
assertEquals(testList.find(_ < 0), None)
assertEquals(testList.count(_ == 6), 1)
assertEquals(testList.count(_ <= 6), 252)
}
test("Test scanLeft, scanRight on testList") {
val toTest = testList.filter(x => x > 0 && x <= 720) // List(2, 6, 24, 120, 720)
val expectedLeft = List(0, 2, 8, 32, 152, 872).map(BigInt(_)).toIndexedSeq
val expectedRight = List(872, 870, 864, 840, 720, 0).map(BigInt(_)).toIndexedSeq
assertEquals(toTest.scanLeft(BigInt(0))(_ + _), expectedLeft)
assertEquals(toTest.scanRight(BigInt(0))(_ + _), expectedRight)
}
test("Test reduceLeft, reduceRight, foldLeft, foldRight on testList") {
val toTest = testList.filter(x => x > 0 && x <= 720) // List(2, 6, 24, 120, 720)
assertEquals(toTest.reduceLeft(_ - _), BigInt(-868))
assertEquals(toTest.reduceRight(_ - _), BigInt(620))
assertEquals(toTest.foldLeft(BigInt(8))(_ - _), BigInt(-864))
assertEquals(toTest.foldRight(BigInt(720))(_ - _), BigInt(-100))
}
test("Test collect on testList") {
val toTest = testList.filter(x => x > 0 && x <= 720) // List(2, 6, 24, 120, 720)
val filterAndMap: PartialFunction[BigInt, BigInt] = {
case x: BigInt if x > 2 && x < 100 => x + 1
}
val expected = List(7, 25).map(BigInt(_)).toIndexedSeq
assertEquals(toTest collect filterAndMap, expected)
}
import scala.concurrent.duration._
override val munitTimeout: FiniteDuration = 10.seconds
}
|
MaksGovor/FP-labs | effective-codecs/codecs/src/main/scala/codecs/Codecs.scala | package codecs
/**
* A data type modeling JSON values.
*
* For example, the `42` integer JSON value can be modeled as `Json.Num(42)`
*/
enum Json:
/**
* Try to decode this JSON value into a value of type `A` by using
* the given decoder.
*
* Note that you have to explicitly fix `A` type parameter when you call the method:
*
* {{{
* someJsonValue.decodeAs[User] // OK
* someJsonValue.decodeAs // Wrong!
* }}}
*/
def decodeAs[A](using decoder: Decoder[A]): Option[A] = decoder.decode(this)
/** The JSON `null` value */
case Null
/** JSON boolean values */
case Bool(value: Boolean)
/** JSON numeric values */
case Num(value: BigDecimal)
/** JSON string values */
case Str(value: String)
/** JSON objects */
case Obj(fields: Map[String, Json])
/** JSON arrays */
case Arr(items: List[Json])
end Json
/**
* A type class that turns a value of type `A` into its JSON representation.
*/
trait Encoder[-A]:
/** Encodes a value of type `A` into JSON */
def encode(value: A): Json
/**
* Transforms this `Encoder[A]` into an `Encoder[B]`, given a transformation function
* from `B` to `A`.
*
* For instance, given a `Encoder[String]`, we can get an `Encoder[UUID]`:
*
* {{{
* def uuidEncoder(given stringEncoder: Encoder[String]): Encoder[UUID] =
* stringEncoder.transform[UUID](uuid => uuid.toString)
* }}}
*
* This operation is also known as “contramap”.
*/
def transform[B](f: B => A): Encoder[B] =
Encoder.fromFunction[B](value => this.encode(f(value)))
end Encoder
object Encoder extends EncoderInstances:
/**
* Convenient method for creating an instance of encoder from a function `f`
*/
def fromFunction[A](f: A => Json) = new Encoder[A] {
def encode(value: A): Json = f(value)
}
end Encoder
trait EncoderInstances:
/** An encoder for the `Unit` value */
given unitEncoder: Encoder[Unit] =
Encoder.fromFunction(_ => Json.Null)
/** An encoder for `Int` values */
given intEncoder: Encoder[Int] =
Encoder.fromFunction(n => Json.Num(BigDecimal(n)))
/** An encoder for `String` values */
// TODO Implement the `Encoder[String]` given instance (Done)
given stringEncoder: Encoder[String] =
Encoder.fromFunction(s => Json.Str(s))
/** An encoder for `Boolean` values */
// TODO Define a given instance of type `Encoder[Boolean]` (Done)
given boolEncoder: Encoder[Boolean] =
Encoder.fromFunction(b => Json.Bool(b))
/**
* Encodes a list of values of type `A` into a JSON array containing
* the list elements encoded with the given `encoder`
*/
given listEncoder[A](using encoder: Encoder[A]): Encoder[List[A]] =
Encoder.fromFunction(as => Json.Arr(as.map(encoder.encode)))
end EncoderInstances
/**
* A specialization of `Encoder` that returns JSON objects only
*/
trait ObjectEncoder[-A] extends Encoder[A]:
// Refines the encoding result to `Json.Obj`
def encode(value: A): Json.Obj
/**
* Combines `this` encoder with `that` encoder.
* Returns an encoder producing a JSON object containing both
* fields of `this` encoder and fields of `that` encoder.
*/
def zip[B](that: ObjectEncoder[B]): ObjectEncoder[(A, B)] =
ObjectEncoder.fromFunction { (a, b) =>
Json.Obj(this.encode(a).fields ++ that.encode(b).fields)
}
end ObjectEncoder
object ObjectEncoder:
/**
* Convenient method for creating an instance of object encoder from a function `f`
*/
def fromFunction[A](f: A => Json.Obj): ObjectEncoder[A] = new ObjectEncoder[A] {
def encode(value: A): Json.Obj = f(value)
}
/**
* An encoder for values of type `A` that produces a JSON object with one field
* named according to the supplied `name` and containing the encoded value.
*/
def field[A](name: String)(using encoder: Encoder[A]): ObjectEncoder[A] =
ObjectEncoder.fromFunction(a => Json.Obj(Map(name -> encoder.encode(a))))
end ObjectEncoder
/**
* The dual of an encoder. Decodes a serialized value into its initial type `A`.
*/
trait Decoder[+A]:
/**
* @param data The data to de-serialize
* @return The decoded value wrapped in `Some`, or `None` if decoding failed
*/
def decode(data: Json): Option[A]
/**
* Combines `this` decoder with `that` decoder.
* Returns a decoder that invokes both `this` decoder and `that`
* decoder and returns a pair of decoded value in case both succeed,
* or `None` if at least one failed.
*/
def zip[B](that: Decoder[B]): Decoder[(A, B)] =
Decoder.fromFunction { json =>
this.decode(json).zip(that.decode(json))
}
/**
* Transforms this `Decoder[A]` into a `Decoder[B]`, given a transformation function
* from `A` to `B`.
*
* This operation is also known as “map”.
*/
def transform[B](f: A => B): Decoder[B] =
Decoder.fromFunction(json => this.decode(json).map(f))
end Decoder
object Decoder extends DecoderInstances:
/**
* Convenient method to build a decoder instance from a function `f`
*/
def fromFunction[A](f: Json => Option[A]): Decoder[A] = new Decoder[A] {
def decode(data: Json): Option[A] = f(data)
}
/**
* Alternative method for creating decoder instances
*/
def fromPartialFunction[A](pf: PartialFunction[Json, A]): Decoder[A] =
fromFunction(pf.lift)
end Decoder
trait DecoderInstances:
/** A decoder for the `Unit` value */
given unitDecoder: Decoder[Unit] =
Decoder.fromPartialFunction { case Json.Null => () }
/** A decoder for `Int` values. Hint: use the `isValidInt` method of `BigDecimal`. */
// TODO Define a given instance of type `Decoder[Int]` (Done)
given intDecoder: Decoder[Int] =
Decoder.fromPartialFunction { case Json.Num(num) if num.isValidInt => num.toInt }
/** A decoder for `String` values */
// TODO Define a given instance of type `Decoder[String]` (Done)
given stringDecoder: Decoder[String] =
Decoder.fromPartialFunction { case Json.Str(str) => str }
/** A decoder for `Boolean` values */
// TODO Define a given instance of type `Decoder[Boolean]` (Done)
given boolDecoder: Decoder[Boolean] =
Decoder.fromPartialFunction { case Json.Bool(bool) => bool }
/**
* A decoder for JSON arrays. It decodes each item of the array
* using the given `decoder`. The resulting decoder succeeds only
* if all the JSON array items are successfully decoded.
*/
// TODO (Done)
given listDecoder[A](using decoder: Decoder[A]): Decoder[List[A]] =
// Decode the provided `item` with the provided `decoder`. If this succeeds,
// return the decoded item **prepended** to the `previouslyDecodedItems`.
def decodeAndPrepend(item: Json, previouslyDecodedItems: List[A]): Option[List[A]] =
decoder.decode(item) match
case Some(decodedValue) => Some(decodedValue :: previouslyDecodedItems)
case None => None
// Decode the provided `item` only if the previous items were successfully decoded.
// In case `maybePreviouslyDecodedItems` is `None` (which means that at least
// one of the previous items failed to be decoded), return `None`.
// Otherwise, decode the provided `item` and prepend it to the previously
// decoded items (use the method `decodeAndPrepend`).
def processItem(item: Json, maybePreviouslyDecodedItems: Option[List[A]]): Option[List[A]] =
maybePreviouslyDecodedItems match
case Some(arr) => decodeAndPrepend(item, arr)
case None => None
// Decodes all the provided JSON items. Fails if any item fails to
// be decoded.
// Iterates over the items, and tries to decode each item if the
// previous items could be successfully decoded.
def decodeAllItems(items: List[Json]): Option[List[A]] =
items.foldRight(Some(List.empty[A]))(processItem)
// Finally, write a decoder that checks whether the JSON value to decode
// is a JSON array.
// - if it is the case, call `decodeAllItems` on the array items,
// - otherwise, return a failure (`None`)
Decoder.fromFunction {
case Json.Arr(arr) => decodeAllItems(arr)
case _ => None
}
/**
* A decoder for JSON objects. It decodes the value of a field of
* the supplied `name` using the given `decoder`.
*/
// TODO (Done)
def field[A](name: String)(using decoder: Decoder[A]): Decoder[A] =
Decoder.fromFunction {
case Json.Obj(obj) if obj.contains(name) => decoder.decode(obj(name))
case _ => None
}
end DecoderInstances
case class Person(name: String, age: Int)
object Person extends PersonCodecs
trait PersonCodecs:
/** The encoder for `Person` */
given Encoder[Person] =
ObjectEncoder.field[String]("name")
.zip(ObjectEncoder.field[Int]("age"))
.transform[Person](user => (user.name, user.age))
/** The corresponding decoder for `Person`.
* Hint: create the decoders for the `name` and `age` JSON fields
* by using the method `Decoder.field`
* Hint: combine the decoders by using their methods `zip` and
* `transform`.
*/
// TODO (Done)
given Decoder[Person] =
Decoder.field[String]("name")
.zip(Decoder.field[Int]("age"))
.transform[Person]({ case (name, age) => Person(name, age) })
end PersonCodecs
case class Contacts(people: List[Person])
object Contacts extends ContactsCodecs
trait ContactsCodecs:
// TODO Define the encoder and the decoder for `Contacts` (Done)
// The JSON representation of a value of type `Contacts` should be
// a JSON object with a single field named “people” containing an
// array of values of type `Person` (reuse the `Person` codecs)
given Encoder[Contacts] =
ObjectEncoder.field[List[Person]]("people")
.transform[Contacts](c => c.people)
given Decoder[Contacts] =
Decoder.field[List[Person]]("people")
.transform[Contacts](Contacts(_))
end ContactsCodecs
case class Book(authors: List[String], name: String)
object Book extends BookCodecs
trait BookCodecs:
// TODO (Done)
// JSON representation of `Book` type value should be as follows.
// a JSON object with two fields: "authors", containing
// an array of strings containing authors' names and
// "name" - a string containing the title of the book (reuse codecs `List[Str] and Str`)
// For example: { "authors": ["<NAME>", "<NAME>"], "name": "Introduction to Functional Programming" }
/** The encoder for `Book` */
given Encoder[Book] =
ObjectEncoder.field[List[String]]("authors")
.zip(ObjectEncoder.field[String]("name"))
.transform[Book](b => (b.authors, b.name))
/** The decoder for `Book` */
given Decoder[Book] =
Decoder.field[List[String]]("authors")
.zip(Decoder.field[String]("name"))
.transform[Book]({ case (authors, name) => Book(authors, name)})
end BookCodecs
// In case you want to try your code, here is a simple `Main`
// that can be used as a starting point. Otherwise, you can use
// the REPL (use the `console` sbt task).
import Util.*
@main def run(): Unit =
println(renderJson(42))
println(renderJson("foo"))
val maybeJsonString = parseJson(""" "foo" """)
val maybeJsonNumber = parseJson(""" 42 """)
val maybeJsonArray = parseJson(""" [1, 2, 3, 4, 5, 6] """)
val maybeJsonObj = parseJson(""" { "name": "Alice", "age": 42 } """)
val maybeJsonObj2 = parseJson(""" { "name": "Alice", "age": "42" } """)
// Uncomment the following lines as you progress in the assignment
println(maybeJsonString.flatMap(_.decodeAs[Int]))
println(maybeJsonString.flatMap(_.decodeAs[String]))
println(maybeJsonNumber.flatMap(_.decodeAs[Int]))
println(maybeJsonArray.flatMap(_.decodeAs[List[Int]]))
println(maybeJsonObj.flatMap(_.decodeAs[Person]))
println(maybeJsonObj2.flatMap(_.decodeAs[Person]))
println(renderJson(Person("Bob", 66)))
val contacts = parseAndDecode[Contacts](
""" { "people": [{ "name": "Alice", "age": 42 },
|{ "name": "John", "age": 26 }] } """.stripMargin)
println(contacts)
println(renderJson(Contacts(List(Person("Bob", 66), Person("Jane", 19)))))
val book = parseAndDecode[Book](
""" { "authors": ["<NAME>", "<NAME>"],
|"name": "Introduction to Functional Programming" } """.stripMargin)
println(book)
println(renderJson(Book(
List("<NAME>", "<NAME>."),
"Structure and Interpretation of Computer Programs")
))
end run
|
MaksGovor/FP-labs | lab_recursion/src/main/scala/recfun/Main.scala | <reponame>MaksGovor/FP-labs
package recfun
import scala.annotation.tailrec
object Main {
def main(args: Array[String]) {
println("Pascal's Triangle")
for (row <- 0 to 10) {
for (col <- 0 to row)
print(pascal(col, row) + " ")
println()
}
println(calcExpression(6))
}
/**
* Exercise 1
*/
def pascal(c: Int, r: Int): Int = {
if (c < 0 || r < 0 || r < c) throw new IllegalArgumentException("Unacceptable index values")
if (c == 0 || c == r) 1
else pascal(c - 1, r - 1) + pascal(c, r - 1)
}
/**
* Exercise 2
*/
def balance(chars: List[Char]): Boolean = {
@tailrec
def loop(chars: List[Char], open: Int): Boolean = {
if (chars.isEmpty) open == 0
else if (open < 0) false
else if (chars.head == '(') loop(chars.tail, open + 1)
else if (chars.head == ')') loop(chars.tail, open - 1)
else loop(chars.tail, open)
}
loop(chars, 0)
}
// def balance(chars: List[Char]) {
// @tailrec
// def loop(chars: List[Char], open: Int = 0): Boolean = chars match {
// case Nil => open == 0
// case _ if open < 0 => false
// case "(" :: tail => loop(tail, open+1)
// case ")" :: tail => loop(tail, open-1)
// case _ :: tail => loop(tail, open)
// }
//
// loop(chars, 0)
// }
/**
* Exercise 3
*/
def countChange(money: Int, coins: List[Int]): Int = {
if (money < 0 || coins.isEmpty) 0
else if (money == 0) 1
else countChange(money - coins.head, coins) + countChange(money, coins.tail)
}
// def countChange(money: Int, coins: List[Int]): Int = coins match {
// case _ if money == 0 => 1
// case _ if money < 0 => 0
// case Nil => 0
// case head :: tail if money > 0 => countChange(money - head, head :: tail) + countChange(money, tail)
// }
/**
* Exercise 4
*/
def calcExpression(x: Int): Int = {
@tailrec
def iterateFact(x: Int, result: Int = 1): Int =
if (x == 0) result
else iterateFact(x - 1, result * x)
if (x == 0 || x == 1) throw new IllegalArgumentException("function not defined on [0, 1]")
else if (x < 0) 0
else iterateFact(x)
}
}
|
MaksGovor/FP-labs | lab_recursion/build.sbt | <filename>lab_recursion/build.sbt<gh_stars>0
name := "lab_recursion"
version := "0.1"
scalaVersion := "2.13.6"
libraryDependencies += "org.scalatest" %% "scalatest" % "3.1.0" % Test
libraryDependencies += "junit" % "junit" % "4.10" % Test
libraryDependencies += "org.scalatestplus" %% "junit-4-13" % "3.2.9.0" % "test"
|
MaksGovor/FP-labs | effective-codecs/codecs/build.sbt | scalaVersion := "3.1.0"
scalacOptions ++= Seq("-deprecation")
libraryDependencies ++= Seq(
"org.typelevel" %% "jawn-parser" % "1.1.2",
"org.scalameta" %% "munit" % "0.7.26" % Test,
"org.scalameta" %% "munit-scalacheck" % "0.7.26" % Test
)
|
MaksGovor/FP-labs | streams/src/main/scala/streams/IndividualTask.scala | <reponame>MaksGovor/FP-labs<gh_stars>0
package streams
import scala.annotation.tailrec
object IndividualTask {
def main(args: Array[String]) {
println("hello stub")
val range = (-250 to 25)
range.foreach(println)
val list = toList(range, countExpression2)
list.foreach(println)
// val listMethod = range filter countExpression.isDefinedAt map countExpression
}
val countExpression = new PartialFunction[Int, BigInt] {
def apply(x: Int): BigInt = {
@tailrec
def iterateFact(x: Int, result: BigInt = 1): BigInt =
if (x == 0) result
else iterateFact(x - 1, result * x)
if (x < 0) 0
else if (x >= 0 && x <= 1) throw new IllegalArgumentException("function not defined on [0, 1]")
else iterateFact(x)
}
def isDefinedAt(x: Int): Boolean = (x < 0 || x > 1)
}
val countExpression2: PartialFunction[Int, BigInt] = {
case x: Int if (x < 0 || x > 1) => {
def iterateFact(x: Int, result: BigInt = 1): BigInt =
if (x == 0) result
else iterateFact(x - 1, result * x)
if (x < 0) 0
else if (x >= 0 && x <= 1) throw new IllegalArgumentException("function not defined on [0, 1]")
else iterateFact(x)
}
}
def toList(range: Seq[Int], f: PartialFunction[Int, BigInt]): List[BigInt] = {
for (x <- range.toList if f isDefinedAt x) yield f(x)
}
} |
MaksGovor/FP-labs | scalashop/src/main/scala/scalashop/IndividualTask.scala | package scalashop
import java.util.concurrent.{ConcurrentSkipListSet, TimeUnit}
import scala.annotation.tailrec
import scala.collection._
import scala.collection.parallel.CollectionConverters._
import scala.collection.parallel.ParSet
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.{Try, Success, Failure}
object IndividualTask {
implicit val globalExecutionContext: ExecutionContext = ExecutionContext.global
def main(args: Array[String]): Unit = {
val rangeT = -250 to 25
val dst = new Array[Option[BigInt]](rangeT.length)
val numTasks = 5
// Using task
// countIntervalPar(rangeT, dst, numTasks)
// dst.foreach(println)
// Using parallel collections
// val resParCol = mapCountInterval(rangeT.par.toSet)
// resParCol.toArray.foreach(println)
// Using Futures
// val maxWaitTime: FiniteDuration = Duration(5, TimeUnit.SECONDS)
// val resFuture = Await.result(collectCountInterval(rangeT), maxWaitTime)
// resFuture.toArray.foreach(println)
// Using Recursion & parallel
val resRec = countIntervalRecPar(rangeT, 4)
resRec.foreach(println)
}
@tailrec
final def iterateFact(x: Int, result: BigInt = 1): BigInt =
if (x == 0) result
else iterateFact(x - 1, result * x)
val countExpression: PartialFunction[Int, BigInt] = {
case x: Int if (x < 0 || x > 1) => {
if (x < 0) x
else if (x >= 0 && x <= 1) throw new IllegalArgumentException("function not defined on [0, 1]")
else iterateFact(x)
}
}
val liftedCE: Int => Option[BigInt] = countExpression.lift
def countInterval(range: Range, start: Int, end: Int, dst: Array[Option[BigInt]]) = {
for (x <- start to end) dst(x) = liftedCE(range(x))
}
def countIntervalPar(range: Range, dst: Array[Option[BigInt]], numTasks: Int) = {
val step = range.length / Math.min(range.length, numTasks)
val byStep = 0 to range.length by step
val intervals = byStep zip byStep.tail
val tasks = for ((from, to) <- intervals) yield task(countInterval(range, from, to, dst))
for (t <- tasks) t.join()
}
def mapCountInterval(src: ParSet[Int]) = {
val result = new ConcurrentSkipListSet[BigInt]()
for (x <- src) if (countExpression.isDefinedAt(x)) {
result.add(countExpression(x))
}
result
}
def countIntervalRecPar(range: Range, maxDepth: Int): Array[Option[BigInt]] = {
val res = new Array[Option[BigInt]](range.length)
def countParallel (from: Int, to: Int, depth: Int): Unit = {
if (depth == maxDepth || from == to) {
countInterval(range, from, to, res)
} else {
val mid = (from + to) / 2
scalashop.parallel(
countParallel(mid, to, depth + 1),
countParallel(from, mid - 1, depth + 1)
)
}
}
if (maxDepth % 2 == 0) countParallel(0, res.length - 1, 0)
res
}
def collectCountInterval(range: Range): Future[IndexedSeq[BigInt]] = {
Future(range collect countExpression)
}
}
|
MaksGovor/FP-labs | quickcheck/src/main/scala/quickcheck/Heap.scala | package quickcheck
trait IntHeap extends Heap {
override type A = Int
override def ord = scala.math.Ordering.Int
}
// http://www.brics.dk/RS/96/37/BRICS-RS-96-37.pdf
// Figure 1, page 3
trait Heap {
type H // type of a heap
type A // type of an element
def ord: Ordering[A] // ordering on elements
def empty: H // the empty heap
def isEmpty(h: H): Boolean // whether the given heap h is empty
def insert(x: A, h: H): H // the heap resulting from inserting x into h
def meld(h1: H, h2: H): H // the heap resulting from merging h1 and h2
def findMin(h: H): A // a minimum of the heap h
def deleteMin(h: H): H // a heap resulting from deleting a minimum of h
}
// Figure 3, page 7
trait BinomialHeap extends Heap {
type Rank = Int
case class Node(x: A, r: Rank, c: List[Node])
override type H = List[Node]
protected def root(t: Node) = t.x
protected def rank(t: Node) = t.r
protected def link(t1: Node, t2: Node): Node = // t1.r == t2.r
if (ord.lteq(t1.x, t2.x)) Node(t1.x, t1.r + 1, t2 :: t1.c) else Node(t2.x, t2.r + 1, t1 :: t2.c)
protected def ins(t: Node, ts: H): H = ts match {
case Nil => List(t)
case tp :: ts => // t.r <= tp.r
if (t.r < tp.r) t :: tp :: ts else ins(link(t, tp), ts)
}
override def empty = Nil
override def isEmpty(ts: H) = ts.isEmpty
override def insert(x: A, ts: H) = ins(Node(x, 0, Nil), ts)
override def meld(ts1: H, ts2: H) = (ts1, ts2) match {
case (Nil, ts) => ts
case (ts, Nil) => ts
case (t1 :: ts1, t2 :: ts2) =>
if (t1.r < t2.r) t1 :: meld(ts1, t2 :: ts2)
else if (t2.r < t1.r) t2 :: meld(t1 :: ts1, ts2)
else ins(link(t1, t2), meld(ts1, ts2))
}
override def findMin(ts: H) = ts match {
case Nil => throw new NoSuchElementException("min of empty heap")
case t :: Nil => root(t)
case t :: ts =>
val x = findMin(ts)
if (ord.lteq(root(t), x)) root(t) else x
}
override def deleteMin(ts: H) = ts match {
case Nil => throw new NoSuchElementException("delete min of empty heap")
case t :: ts =>
def getMin(t: Node, ts: H): (Node, H) = ts match {
case Nil => (t, Nil)
case tp :: tsp =>
val (tq, tsq) = getMin(tp, tsp)
if (ord.lteq(root(t), root(tq))) (t, ts) else (tq, t :: tsq)
}
val (Node(_, _, c), tsq) = getMin(t, ts)
meld(c.reverse, tsq)
}
}
trait Bogus1BinomialHeap extends BinomialHeap {
override def findMin(ts: H) = ts match {
case Nil => throw new NoSuchElementException("min of empty heap")
case t :: ts => root(t)
}
}
trait Bogus2BinomialHeap extends BinomialHeap {
override protected def link(t1: Node, t2: Node): Node = // t1.r == t2.r
if (!ord.lteq(t1.x, t2.x)) Node(t1.x, t1.r + 1, t2 :: t1.c) else Node(t2.x, t2.r + 1, t1 :: t2.c)
}
trait Bogus3BinomialHeap extends BinomialHeap {
override protected def link(t1: Node, t2: Node): Node = // t1.r == t2.r
if (ord.lteq(t1.x, t2.x)) Node(t1.x, t1.r + 1, t1 :: t1.c) else Node(t2.x, t2.r + 1, t2 :: t2.c)
}
trait Bogus4BinomialHeap extends BinomialHeap {
override def deleteMin(ts: H) = ts match {
case Nil => throw new NoSuchElementException("delete min of empty heap")
case t :: ts => meld(t.c.reverse, ts)
}
}
trait Bogus5BinomialHeap extends BinomialHeap {
override def meld(ts1: H, ts2: H) = ts1 match {
case Nil => ts2
case t1 :: ts1 => List(Node(t1.x, t1.r, ts1 ++ ts2))
}
}
|
devership16/INF553-YelpProject | src/data_preparation/Gov_Data_Las_Vegas_Prep.scala | <filename>src/data_preparation/Gov_Data_Las_Vegas_Prep.scala<gh_stars>1-10
import java.io.File
import org.apache.hadoop.fs.FileUtil
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
/**
* Created by kaurr.
*/
object Gov_Data_Las_Vegas_Prep {
def main(args: Array[String]): Unit = {
//replace x with args(0)
val inputPath = "/Users/apple/Documents/DM/Project/las_vegas/Restaurant_Inspections_LV.csv"
//replace y with args(1)
val outputPath = "/Users/apple/Documents/DM/Project/las_vegas/Restaurant_Inspections_LV_Final.csv"
val conf = new SparkConf()
conf.setAppName("Datasets Test")
conf.setMaster("local[2]")
val spark_context = new SparkContext(conf)
val sparkSession = SparkSession
.builder()
.config(conf)
.getOrCreate()
val dfTags = sparkSession
.read
.option("header", "true")
.option("inferSchema", "false")
.csv(inputPath)
.toDF()
val myCols = dfTags.select("Restaurant Name","Location Name" ,"Category Name", "Address", "City", "State", "Zip", "Current Demerits","Current Grade",
"Inspection Date","Inspection Demerits","Inspection Grade","Permit Status","Inspection Result","Violations","Date Current")
//count number of violations
val newDf = myCols.columns.foldLeft(myCols)((curr, n) => curr.withColumnRenamed(n, n.replaceAll("\\s", "")))
def range = udf((violations: String) => {
violations.split(",").length
})
val violationsCount = newDf.withColumn("ViolationsCount",range(col("Violations")))
//replace zip with only first five digits
//replace zip with only first five digits
def zipFormat = udf((zip: String) => {
val lastindex = zip.indexOf('-')
var result = zip
if(lastindex != -1) result = zip.substring(0,lastindex)
result
})
val zipFormatted = violationsCount.withColumn("ZipCode",zipFormat(col("Zip"))).drop("Zip").withColumnRenamed("ZipCode","Zip")
zipFormatted.take(4).foreach(println)
def addScoreFromGrade = udf((grade: String) => {
var score = 0
if (grade == "A") score = 95
else if (grade == "B") score = 85
else if (grade == "C") score = 75
else if (grade == "N") score = 35
else if (grade == "O") score = 35
else if (grade == "X") score = 65
else if (grade == "P") score = 35
else if (grade == "S") score = 35
score
})
val scoresFromGradesCurrent = zipFormatted.withColumn("CurrentScore",addScoreFromGrade(col("CurrentGrade")))
val scoresFromGradesInspection = scoresFromGradesCurrent.withColumn("InspectionScore",addScoreFromGrade(col("InspectionGrade")))
val uniqueDf = scoresFromGradesInspection.groupBy("RestaurantName", "LocationName", "CategoryName", "Address","City","State","Zip")
.agg(sum("ViolationsCount") as "Violations",sum("CurrentDemerits") as "CurrentDemerits",
sum("InspectionDemerits") as "InspectionDemerits",avg("CurrentScore")
as "CurrentScore",avg("InspectionScore") as "InspectionScore"
)
def addGradeFromScore = udf((score_number: String) => {
val score = score_number.toDouble
var grade = ""
if (score >= 90) grade = "A"
else if (score >= 80 && score < 90) grade = "B"
else if (score >= 70 && score < 80) grade = "C"
else if (score >= 60 && score < 70) grade = "D"
else if (score >0 && score <60) grade = "E"
grade
})
val finalDf1 = uniqueDf.withColumn("CurrentGrade",addGradeFromScore(col("CurrentScore")))
val finalDf2 = finalDf1.withColumn("InspectionGrade",addGradeFromScore(col("InspectionScore")))
val file = outputPath.substring(0,outputPath.lastIndexOf('/'))+"/temp.csv"
FileUtil.fullyDelete(new File(file))
val destinationFile = outputPath
FileUtil.fullyDelete(new File(destinationFile))
finalDf2.coalesce(1).write.option("header", "true").csv(file)
merge(file, destinationFile)
val currentGradeCount = finalDf2.groupBy("CurrentGrade").count().show();
val inspectionGradeCount = finalDf2.groupBy("InspectionGrade").count().show();
}
def merge(srcPath: String, dstPath: String): Unit = {
val hadoopConfig = new Configuration()
val hdfs = FileSystem.get(hadoopConfig)
FileUtil.copyMerge(hdfs, new Path(srcPath), hdfs, new Path(dstPath), true, hadoopConfig, null)
}
}
|
seglo/akka-projection | build.sbt | import akka.projections.Dependencies
scalaVersion := "2.13.1"
val commonSettings = Seq(
organization := "com.lightbend.akka",
scalacOptions ++= List(
"-unchecked",
"-deprecation",
"-language:_",
"-Xfatal-warnings",
"-Ywarn-unused",
"-encoding",
"UTF-8"),
javacOptions ++= List("-Xlint:unchecked", "-Xlint:deprecation"))
lazy val akkaProjectionCore =
Project(id = "akka-projection-core", base = file("akka-projection-core")).settings(Dependencies.core)
lazy val akkaProjectionTestkit = Project(id = "akka-projection-testkit", base = file("akka-projection-testkit"))
.settings(libraryDependencies ++= Seq(Dependencies.Test.scalaTest))
.dependsOn(akkaProjectionCore)
lazy val root = Project(id = "akka-projection", base = file(".")).aggregate(akkaProjectionCore, akkaProjectionTestkit)
// check format and headers
TaskKey[Unit]("verifyCodeFmt") := {
scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
"Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code")
}
(Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
"Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code")
}
}
addCommandAlias("verifyCodeStyle", "headerCheckAll; verifyCodeFmt")
|
seglo/akka-projection | akka-projection-core/src/main/scala/akka/projection/Projection.scala | <reponame>seglo/akka-projection<gh_stars>1-10
/*
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.projection
import akka.Done
import akka.actor.ClassicActorSystemProvider
import akka.annotation.ApiMayChange
import scala.concurrent.{ ExecutionContext, Future }
@ApiMayChange
trait Projection {
def start()(implicit systemProvider: ClassicActorSystemProvider): Unit
def stop(): Future[Done]
}
|
seglo/akka-projection | project/Dependencies.scala | package akka.projections
import sbt._
import sbt.Keys._
object Dependencies {
object Versions {
val akka = "2.6.4"
val scalaTest = "3.1.1"
}
object Compile {
val akkaStream = "com.typesafe.akka" %% "akka-stream" % Versions.akka
}
object Test {
val scalaTest = "org.scalatest" %% "scalatest" % Versions.scalaTest % sbt.Test
}
private val deps = libraryDependencies
val core = deps ++= Seq(Compile.akkaStream)
}
|
seglo/akka-projection | akka-projection-testkit/src/main/scala/akka/projection/testkit/ProjectionTestRunner.scala | <reponame>seglo/akka-projection<gh_stars>1-10
/*
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.projection.testkit
import akka.actor.ClassicActorSystemProvider
import akka.annotation.ApiMayChange
import akka.projection.Projection
import scala.concurrent.duration._
import scala.concurrent.{ Await, ExecutionContext }
@ApiMayChange
trait ProjectionTestRunner {
def runProjection(proj: Projection)(testFunc: => Unit)(implicit systemProvider: ClassicActorSystemProvider): Unit =
runProjection(proj, 5.seconds)(testFunc)
def runProjection(proj: Projection, timeout: FiniteDuration)(testFunc: => Unit)(
implicit systemProvider: ClassicActorSystemProvider): Unit = {
try {
proj.start()
testFunc
} finally {
Await.ready(proj.stop(), timeout)
}
}
}
|
seglo/akka-projection | project/plugins.sbt | <reponame>seglo/akka-projection
addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.3.2")
addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.4.0")
addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.0.0")
addSbtPlugin("com.lightbend.akka" % "sbt-paradox-akka" % "0.31")
addSbtPlugin("com.lightbend" % "sbt-whitesource" % "0.1.18")
addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "0.9.3")
addSbtPlugin("org.foundweekends" % "sbt-bintray" % "0.5.4")
|
Activiti/activiti-cloud-performance-tests | src/test/scala/simulation/BasicSimulation.scala | <reponame>Activiti/activiti-cloud-performance-tests
package simulation
import java.util.concurrent.TimeUnit
import io.gatling.core.Predef._
import io.gatling.http.Predef._
class BasicSimulation extends Simulation {
val httpConf = http
.baseURL("http://" + System.getenv("GATEWAY_HOST"))
val scn = scenario("Basic interaction")
.exec(http("getAuthentication")
.post("http://" + System.getenv("AUTH_HOST") + "/auth/realms/springboot/protocol/openid-connect/token")
.formParam("client_id", "activiti")
.formParam("grant_type", "password")
.formParam("username", "hruser")
.formParam("password", "password")
.check(jsonPath("$..access_token").ofType[String].saveAs("token"))
)
.exec(http("getProcessDefinitions")
.get("/rb-my-app/v1/process-definitions")
.header("Authorization", "Bearer ${token}")
.check(jsonPath("$..id").ofType[String].saveAs("processDefinitionId"))
)
.exec(http("startProcess")
.post("/rb-my-app/v1/process-instances")
.header("Authorization", "Bearer ${token}")
.header("Content-Type", "application/json")
.body(StringBody(
"""
{
"processDefinitionId": "${processDefinitionId}",
"commandType":"StartProcessInstanceCmd"
}
"""))
.check(jsonPath("$..id").ofType[String].saveAs("processInstanceId"))
)
.exec(http("getProcessTask")
.get("/rb-my-app/v1/process-instances/${processInstanceId}/tasks")
.header("Authorization", "Bearer ${token}")
.check(jsonPath("$..id").ofType[String].saveAs("taskId"))
)
.exec(http("claimTask")
.post("/rb-my-app/v1/tasks/${taskId}/claim")
.queryParam("assignee", "hruser")
.header("Authorization", "Bearer ${token}")
)
.exec(http("completeTask")
.post("/rb-my-app/v1/tasks/${taskId}/complete")
.header("Authorization", "Bearer ${token}")
).pause(1)
// .pause("500" ,TimeUnit.MILLISECONDS)
.exec(http("taskCompletedEvent")
.get("/audit/v1/events")
.queryParam("processInstanceId", "${processInstanceId}")
.queryParam("eventType", "TaskCompletedEvent")
.header("Authorization", "Bearer ${token}")
.check(jsonPath("$..id").ofType[String])
)
.exec(http("taskCompletedQuery")
.get("/query/v1/tasks")
.queryParam("id", "${taskId}")
.queryParam("status", "COMPLETED")
.header("Authorization", "Bearer ${token}")
.check(jsonPath("$..id").ofType[String])
)
.exec(session => {
println("Variables logging(this is optional of course):")
println(session("token").as[String])
println(session("processDefinitionId").as[String])
println(session("processInstanceId").as[String])
println(session("taskId").as[String])
session
})
setUp(scn.inject(constantUsersPerSec(5) during 60).protocols(httpConf))
} |
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_Exc03_GroupByExamples.scala | package org.pengfei.Lesson04_Spark_SQL
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.collect_list
import org.apache.spark.sql.functions.collect_set
object Lesson04_Exc03_GroupByExamples {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Test").master("local[2]").getOrCreate()
import spark.implicits._
val clusterLabels=spark.sparkContext.parallelize(Array((0,("normal",1)),(0,("normal",2)),(0,("back",1)),(1,("neptune",1)),(1,("satan",0)),(1,("nep",3)))).toDF("cluster","label").as[(Int,(String,Int))]
/*First example, we groupBy on cluster, and concatane all elements of columne label in a new column "labels", here
* we can use two pre-define functions*/
// collect_list concatane all element without removing any
val groupedClusterLabelsWithList=clusterLabels.groupBy("cluster").agg(collect_list("label").as("labels"))
// collect_set concataine all but remove doubles, so only distinct value in the result array.
val groupedClusterLabelsWithSet=clusterLabels.groupBy("cluster").agg(collect_set("label").as("labels"))
groupedClusterLabelsWithList.show(false)
groupedClusterLabelsWithSet.show(false)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson17_Analyze_Clinical_Data/Lesson17_Compare_Two_Columns.scala | package org.pengfei.Lesson17_Analyze_Clinical_Data
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}
import scala.collection.mutable.Map
object Lesson17_Compare_Two_Columns {
def main(args:Array[String]):Unit= {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson17_Compare_Two_Columns").master("local[2]").getOrCreate()
import spark.implicits._
val df = spark.sparkContext.parallelize(Seq(
("null","1"),
("cat13","cat13"),
("cat95","cat95"),
("0","0"),
("1","1"),
("0","1"),
("1","0"),
("1","null"),
("cat56","null"),
("null","cat40"),
("null","null")
)).toDF("Val1", "Val2")
val colMap=Map("Val1"->"Val2")
/*val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA",
"CBD_Dobut_SOFA","CBD_Hepat_SOFA",
"CBD_Neuro_SOFA","CBD_Renal_SOFA",
"CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA")
val colMap=Map[String,String]()
for(colName<-sofaValueColumns){
colMap(colName+"_D01")=(colName+"_D01-D02")
}
println(colMap.toString())*/
CompareRowValueOfTwoCols(df,colMap)
}
/** This function takes a data frame and a map of (colNum->colName), the elements of the return map are
* sorted by the column number with asc order.
*
* @author <NAME>
* @version 1.0
* @since 2018-12-20
* @param df The source data frame.
* @return a Map[Int, String]*/
def CompareRowValueOfTwoCols(df:DataFrame, colMap: Map[String,String]): Unit ={
for ((k,v)<-colMap){
val res1=df.filter(!(df(k)===df(v))).select(k,v)
println(s"List of possible colision value row with null")
res1.show(10,false)
val res2=res1.filter((!(res1(k)==="null"))&&(!(res1(v)==="null"))).select(k,v)
println(s"List of possible colision value row without null")
res2.show()
}
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_5_3_3_Spark_ML_Exo1.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_5_3_3_Spark_ML_Exo1.scala<gh_stars>0
package org.pengfei.Lesson05_Spark_ML
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.tuning.{CrossValidator, ParamGridBuilder}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
object Lesson05_5_3_3_Spark_ML_Exo1 {
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson5_5_3_3_Spark_ML_Exo1").getOrCreate()
/****************************************************************************************************************
* **********************************5.5.3.3 Spark ML Exo1*****************************************************
* ***********************************************************************************************************/
/************************************5.5.3.3.1 data set *********************************************************/
/* We will use the sentiment labeled sentences dataset available at https://archive.ics.uci.edu/ml/
* datasets/Sentiment+Labelled+Sentences. It was created for the paper, “From Group to Individual Labels
* Using Deep Features” published by <NAME>, <NAME>, <NAME>, and <NAME> at
* KDD 2015.
*
* This dataset contains a sample of reviews from three websites, imdb.com, amazon.com, and yelp.com.
* It includes randomly selected 500 positive and 500 negative reviews from each website. A review with
* negative sentiment is labeled 0 and positive review is labeled 1. A review is separated from its label
* by the tabcharacter.
*
* For simplicity sake, we will use only the reviews from imdb.com. These reviews are in the
* imdb_labelled.txt file.*/
/************************************5.5.3.3.2 Goal of the exo *************************************************/
/* Our goal is to train a predictive model that predicts whether a sentence has a positive or negative sentiment.
* To be more specific, we will train and evaluate a binary classifier using the dataset in the
* imdb_labelled.txt file.*/
import spark.implicits._
/*******************Step 1 read data and transform data into dataframe********************/
val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/sentiment_labelled_sentences/imdb_labelled.txt"
val lines=spark.sparkContext.textFile(filePath)
val totoalLineNum=lines.count()
println(s"Total line number : $totoalLineNum")
val word=lines.map(line=>line.split("\\t"))
//println(s"word value: ${word.first().toArray.mkString(";")} ")
val df=word.map{a=>ImdbDS(a(0),a(1).toDouble)}.toDF()
// understand data
// get data size
val rowNum=df.count()
val columnNum= df.columns.length
println(s"data set has $rowNum rows, and $columnNum columns" )
// get data schema
df.printSchema()
// get all possible label value (for classification problem)
df.select($"label").distinct().show()
// get observation number or each label
df.groupBy("label").agg(count($"text").alias("textNumCount")).show()
// test if text column has null value
val nullText=df.filter($"text".isNull||$"text".isNaN).count()
val nullLabel=df.filter($"label".isNull||$"label".isNaN).count()
println(s"null text number is $nullText , null label number is $nullLabel")
// get a dataset sample
df.show(5)
/********************************Step 2 prepare test, training data***********************************/
val Array(trainingData,testData)=df.randomSplit(Array(0.8,0.2))
println(s"test data size ${testData.count()}, training data size ${trainingData.count()}")
/*******************************Step 3 Transform data to a format which ml can use **********************/
// tokenize training data
val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
val tokenizedData= tokenizer.transform(trainingData)
// create a feature vector to represent a sentence
/* HashingTF is a Transformer that converts a sequence of words into a fixed-length feature Vector. It maps
* a sequence of terms to their term frequencies using a hashing function.
*
* The preceding code first creates an instance of the HashingTF class. Next, it sets the number of features,
* the name of the input DataFrame column that should be used for generating feature Vectors, and the name
* for the new column that will contain the feature Vectors generated by HashingTF.*/
val hashingTF=new HashingTF()
.setNumFeatures(1000)
.setInputCol(tokenizer.getOutputCol)
.setOutputCol("features")
val hashedData=hashingTF.transform(tokenizedData)
/* Now we have the Transformers required to transform our raw data into a format that can be used with a
* machine learning algorithm. The DataFrame that will be generated by the transform method of hashingTF
* will have a column named "label", which stores each label as a Double, and a column named "features",
* which stores the features for each observation as a Vector.
*
* Next, we need an Estimator to fit a model on the training dataset. For this example, we will use the
* LogisticRegression class provided by the Spark ML library*/
/*******************************Step 4 build a ml model ****************************************/
val lrModel= new LogisticRegression()
.setMaxIter(10)
.setRegParam(0.01)
/******************************Step 5 build a pipeline *****************************************/
/* We can build a pipeline which can chained the transformer and ml model togethor, this can avoid many
* intermediate values*/
val pipeline=new Pipeline().setStages(Array(tokenizer,hashingTF,lrModel))
/* The above pipeline creates an instance of the Pipeline class with three stages. The first two stages are
* Transformers and third stage is an Estimator. The pipeline object will first use the specified Transformers to
* transform a DataFrame containing raw data into a DataFrame with the feature Vectors. Finally, it will use the
* specified Estimator to train or fit a model on the training dataset.*/
/*****************************Step 6 train the pipeline model ***********************************/
val pipelineModel=pipeline.fit(trainingData)
/******************************Step 7 predict the training data************************************/
val testPredictions=pipelineModel.transform(testData)
val traiingPredictions=pipelineModel.transform(trainingData)
/*****************************Step 8 evaluate the model *****************************************/
val evaluator = new BinaryClassificationEvaluator()
val evaluatorParamMap=ParamMap(evaluator.metricName->"areaUnderROC")
val aucTraining=evaluator.evaluate(traiingPredictions,evaluatorParamMap)
val aucTest=evaluator.evaluate(testPredictions,evaluatorParamMap)
println(s"auc for training is $aucTraining, for test is $aucTest")
/* we can noticed that our auc score for training data is 1.0 which means the model is prefect, but for test
* data, it only has 0.71, which is not far from 0.5 (worthless model)*/
/***************************Step 9 tuning the model**********************************/
/*To improve a models performance, we often tune the model's hyperparameters first. Spark Ml provides
* a CrossValidator class that can help with this task. It requires a parameter grid over which it conducts
* a grid search to find the best hyperparameters using k-fold cross validation*/
val paramGrid = new ParamGridBuilder()
.addGrid(hashingTF.numFeatures,Array(10000,10000))
.addGrid(lrModel.regParam,Array(0.01,0.1,1.0))
.addGrid(lrModel.maxIter,Array(20,30))
.build()
/* This code created a parameter grid consisting of two values for the number of features, three values for
* the regularization parameters, and two values for the maximum number of iterations. It can be used to do
* a grid search over 12 different combinations of the hyperparameter values. You can specify more options,
* but training a model will take longer since grid search is a brute-force method that tries all the different
* combinations in a parameter grid. As mentioned earlier, using a CrossValidator to do a grid search can be
* expensive in terms of CPU time.*/
val crossValidator=new CrossValidator()
.setEstimator(pipeline)
.setEstimatorParamMaps(paramGrid)
.setNumFolds(10)
.setEvaluator(evaluator)
val crossValidatorModel=crossValidator.fit(trainingData)
/* The fit method in the CrossValidator class returns an instance of the CrossValidatorModel class.
* Similar to other model classes, it can be used as a Transformer that predicts a label for a given feature Vector*/
val newTestPredictions = crossValidatorModel.transform(testData)
val newAucTest = evaluator.evaluate(newTestPredictions, evaluatorParamMap)
println(s"auc value with cross validation tuning is ${newAucTest}")
/* As you can see, the performance of the cross validation model is much better with a auc score of 0.825*/
}
case class ImdbDS(text:String,label:Double)
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson17_Analyze_Clinical_Data/Lesson17_WithColumn_When_Otherwise.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson17_Analyze_Clinical_Data
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StringType //for 'when'
object Lesson17_WithColumn_When_Otherwise {
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson17_WithColumn_").getOrCreate()
import spark.implicits._
val df = spark.sparkContext.parallelize(Seq((4, "blah", 2), (2, "", 3), (56, "foo", 3), (100, null, 5)))
.toDF("A", "B", "C")
df.show()
/*val newDf = df.withColumn("D", when($"B" === "", 0))
newDf.show()*/
/*val newDf=replaceSpecValue(df,Array("B"),"foo","ok")
newDf.show()*/
//remove a row which column B = ""
val afterDelete=removeRowsWithSpecValue(df,"B","")
afterDelete.show()
val afterDeleteR=removeRowsWithSpecValues(df,"B",Array("","blah"))
afterDeleteR.show()
}
def replaceSpecValue(rawDf:DataFrame,colNames:Array[String],specValue:String,newValue:String):DataFrame={
/*Step 0 : cast all column to string*/
val spark=rawDf.sparkSession
import spark.implicits._
val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*)
/*Step 1 : transform spec value to null*/
var result=df
for(colName<-colNames){
val newColName=colName+"_tmp"
/* We must specify the when clause with .otherwise(value), if not it will replace all rows which do not satisfy
* the when condition with null. In our case we use the origin value of the row otherwise(result(colName)).
*/
result=result.withColumn(newColName, when(result(colName) === specValue, newValue).otherwise(result(colName))) //create a tmp col with digitnull
.drop(colName) //drop the old column
.withColumnRenamed(newColName,colName) // rename the tmp to colName
}
result
}
def removeRowsWithSpecValue(df:DataFrame,colName:String,specValue:String):DataFrame={
val result=df.filter(!(df(colName)===specValue))
result
}
def removeRowsWithSpecValues(df:DataFrame,colName:String,specValues:Array[String]):DataFrame={
var result=df
for(specValue<-specValues){
result=result.filter(!(result(colName)===specValue))
}
result
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson01_RDD/Lesson01_RDD_Basics.scala | <gh_stars>0
package org.pengfei.Lesson01_RDD
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel
/** **************************************************************************************
* ************************************Introduction ***********************************
* ************************************************************************************
*
* An RDD in spark is simply an immutable distributed collection of objects. Each RDD
* is split into multiple partitions, which may be computed on different nodes of the cluster.
* RDDs can contain any type of Python, Java or Scala objects, including user-defined
* classes.
*
* Users create RDDs in two ways: by loading an external dataset, or by distributing a
* collection of objects (e.g. a list or set) in their driver program.
* https://spark.apache.org/docs/2.2.0/rdd-programming-guide.html
*
* **********************************************************************************/
object Lesson01_RDD_Basics {
def main(args: Array[String]) = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[4]"). //spark://10.70.3.48:7077 remote
appName("Lesson1_RDD").
getOrCreate()
// import sparkSession.implicits._ for all schema conversion magic.
//get dynamic data path
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path = sparkConfig.getString("sourceDataPath")
RddCreationOperation(spark, path)
// RddTransformationOperation(spark)
// RddActionOperations(spark)
/* The special transformations and actions for RDD with Key value pair and num types and be found
* in the file Lesson1_RDDWithKeyValuePair.scala and Lesson1_RDDWithNumTypes.scala */
//RddSaveOperation(spark)
//RddCachingOperations(spark)
// RddUdfOperations(spark)
}
def RddCreationOperation(spark: SparkSession, path: String): Unit = {
// get the spark context instance
val sc = spark.sparkContext
/** ************************************************************************************************
* ********************* 1.1 Creating RDDs ***********************************************************
* ************************************************************************************************/
/*
* Since RDD is an abstract class, you cannot create an instance of the RDD class directly. The SparkContext
* class provides factory methods to create instances of concrete implementation classes. An RDD can also be
* created from another RDD by applying a transformation to it. As discussed earlier, RDDs are immutable. Any
* operation that modifies an RDD returns a new RDD with the modified data.
*
* There is three main ways to create an RDD:
* 1. RDDs creation with in memory collection of objects
* 2. RDDs creation with files
* 3. RDDs creation with RDD transformation operations
*
* Spark driver will submit a new job to executor when it encounters action. Spark's RDD are by default recomputed
* each time you run an action on them. If you would like to reuse an RDD in multiple actions, you can ask spark
* to persist it using RDD.persist(). After computing it the first time Spark will store the RDD contents in
* memory(partitioned across the machines in your cluster). Persisting RDDs on disk is also possible.
*
* The behavior of not persisting by default may again seem unusual, but it makes a lot of sense for big datasets:
* if you will not reuse the RDD, there’s no reason to waste storage space when Spark could instead stream
* through the data once and just compute the result.
* */
// print(linesHasPython.count())
// print(linesHasPython.first())
/*
* The ability to always recompute an RDD is actually why RDDs are called "resilient". When a machine holding
* RDD data fails, Spark uses this ability to recompute the missing partitions, transparent to the user.
* */
/** 1.1.1 RDDs creation with in memory collection of objects
*
* The first way to create an RDD is to parallelize an object collection, meaning converting it to a
* distributed dataset that can be operated in parallel. This is a great way to get started in learning
* Spark because it is simple and doesn’t require any data files.
*
* Spark context provide two methods:
* - parallelize() : It takes a in memory collection and returns an RDD
* - makeRDD(): It's a warp method of parallelize(). makeRDD calls parallelize() to create RDDs
*
* This works for all scala collection types: List, Array, Seq, etc.
*/
val strCollection1 = List("pandas", "i like pandas")
val rddFromMem1 = spark.sparkContext.parallelize(strCollection1)
val linesHasPandas = rddFromMem1.filter(line => line.contains("pandas"))
print(linesHasPandas.count())
print(linesHasPandas.first())
val strCollection2 = Array("spark", "i like spark")
//numSlices defines the partition number
val rddFromMem2 = sc.makeRDD(strCollection2,4)
// persist an rdd in memory
rddFromMem2.persist(StorageLevel.MEMORY_ONLY_SER)
/** 1.1.2 RDD creation with external datasets
* Spark can create RDDs from any storage source supported by Hadoop, including your local file system, HDFS,
* Cassandra, HBase, Amazon S3, etc. Spark supports text files, SequenceFiles, and any other Hadoop InputFormat.
*
* Spark context provide several methods to read file and convert it to RDDs.
* - textFile(path:String,minPartitions:int): The minPartitions specifies the minimum partition of an RDD
* base on the config of the context, spark can make more. By default, spark creates one partition
* for each hdfs file block
* - wholeTextFiles(): This method reads all text files in a directory and returns an RDD of key-value pairs.
* Each key-value pair in the returned RDD corresponds to a single file. The key stores the path
* of a file and the value part stores the content of a file.
* - sequenceFile(): The sequenceFile method reads key-value pairs from a sequence file. It returns an RDD of
* key-value pairs. In addition to providing the name of an input file, you have to specify the data
* types for the keys and values as type parameters when you call this method
*
* textFile vs wholeTextFiles
* If we use wildcard path argument in these two methods, they will both read multiple files and return an RDD.
* In the RDD returned by textFile, each line in a file will be an element of the rdd. So you can't determine
* which line is form which file. wholeTextFile() will return a rdd of type key value pair, key is the file path,
* value is the list of lines of the file.
*
* */
/** 1.1.2.1 textFile code example */
val readMeFilePath = s"${path}/spark_lessons/Lesson01_RDD/README.md"
val readMeLines = sc.textFile(readMeFilePath,4)
val linesHasPython = readMeLines.filter(line => line.contains("python"))
print(linesHasPython.first())
/** 1.1.2.2 wholeTextFiles code example */
val allRdd = sc.textFile(s"${path}/spark_lessons/Lesson01_RDD/*.txt")
val allRddFromLessson1 = sc.wholeTextFiles("/home/pliu/data_set/spark_data_set/spark_lessons/Lesson01_RDD/*.txt")
/* */
println(s"rdd with textFile : ${allRdd.collect().toList.toString()}")
println(s"rdd with wholeTextFile : ${allRddFromLessson1.collect().toMap.toString()}")
/** 1.1.2.3 sequenceFile code example */
//val rdd = sc.sequenceFile[String, String]("SequenceFilePath")
/** 1.1.3 RDD creation with transformation
* There are many RDD transformation operations which can create new RDDs. I only give two example:
* - Exp1: We convert a dataFrame to RDD.
* - Exp2: We use a map() operation, which transform an RDD of lines of a file (RDD[String]) to an RDD of class
* clientSatisfied(RDD[clientSatisfied]). Note class client_satisfied is opaque to RDD.
*/
/** 1.1.3.1 Exp1: RDD creation from dataframe */
val satisfiedClientFilePath=s"${path}/spark_lessons/Lesson01_RDD/satisfait_client.data"
val df = spark.read.format("csv").option("header", "true").load(satisfiedClientFilePath)
df.show()
val dfToRdd = df.rdd
println(s"rdd value : ${dfToRdd.collect().toList.toString}")
/** 1.1.3.2 Exp2: Transform RDD with map */
case class clientSatisfied(manager_name: String, client_name: String, client_gender: String,
client_age: Int, response_time: Double, statisfaction_level: Double)
val client_sat_lines = sc.textFile(satisfiedClientFilePath)
val satisfait_client = client_sat_lines.map { l => {
// isEmpty is a string specific method for null value testing, because ==null does not work in scala
if (l.isEmpty) {
println("This is an empty line")
}
else {
val s = l.split(",")
// s is not an rdd , l is a string, l.split returns an array of String
// println(s"type of line after split: ${s.getClass.getName}")
clientSatisfied(s(0), s(1), s(2), s(3).toInt, s(4).toDouble, s(5).toDouble)
}
}
}
// Below code is illegal, because class clientSatisfied is opaque for RDD.
// satisfait_client.first().manager_name
println(s" satisfait_client_rdd value is ${satisfait_client.collect().mkString(",")}")
}
/** **********************************************************************************************
* ************************************* 1.2 RDDs operations : transformation******************
* ***************************************************************************************************/
def RddTransformationOperation(spark: SparkSession): Unit = {
/*
* RDDs transformation are all lazy in spark, transformation does not mutate the existing inputRDD. Instead,
* it returns a pointer to an entirely new RDD.
*
* RDD transformations are conceptually similar to Scala collection methods. The key difference is that
* the Scala collection methods operate on data that can fit in the memory of a single machine, whereas RDD
* methods can operate on data distributed across a cluster of nodes.
*
* In index 1, you can find all rdd transformation list, here we just illustrate some important transformation
* */
/** *************************************** Map and MapPartitions *****************************************/
/* Map method is a higher-order method that takes a functions as input and applies it to each element
* in the source RDD to create a new RDD. The input function must take a signle argument and return a
* signle value */
val numRDD = spark.sparkContext.parallelize(List(1, 2, 3, 4))
val square = numRDD.map(x => x * x)
println("Square of each element of RDD done by Map: " + square.collect().mkString(","))
/* mapPartitions method allows you to process data at a partition level, Instead of passing one
* element at a time to its input function, mapPartitions passes a partition in the form of an
* iterator. The input function to the mapPartitions method takes an iterator as input and returns
* another iterator as output. The mapPartitions method returns new RDD formed by applying a
* user-specified function to each partition of the source RDD.*/
val squarePartMap = numRDD.mapPartitions(iter => iter.map { x => x * x })
println("Square of each element of RDD done by MapPartitions: " + squarePartMap.collect().mkString(","))
/** ************************************* Flatmap *****************************************/
/* Flat map is similar to map, the difference is flatmap will flat the content of the rdd.
* For example map(line=>line.split(" ") will generate a list1(list11(),list12()) list1 is the
* list of all lines, list11 is the list of words in line number 1 of all lines
*
* flatMap will generate a list1(word1,word2,....) list1 is the list of all words*/
val textRDD = spark.sparkContext.parallelize(List("hello world", "hi"))
val words = textRDD.flatMap(line => line.split(" "))
println(s"Flat map of words value : ${words.collect().mkString(",")}")
/** ****************************************Filter *******************************************/
/* The filter method takes a Boolean function as input and applies it to each element in the source
* RDD to create a new RDD. The filter method returns a new RDD formed by selecting only those elements
* for which the input Boolean function returned true. Thus, the new RDD contains a subset of the elements
* in the original RDD*/
val logRDD = spark.sparkContext.textFile("/home/pliu/data_set/spark_data_set/spark_lessons/Lesson01_RDD/log.log")
val errorRDD = logRDD.filter(line => line.contains("ERROR"))
val warnRDD = logRDD.filter(line => line.contains("WARN"))
println(s"error lines value: ${errorRDD.first()}")
println(s"warn lines value: ${warnRDD.first()}")
/** ****************************** Union, intersection and subtract *******************************/
val badLineRDD = errorRDD.union(warnRDD)
val num1 = spark.sparkContext.parallelize(List(1, 2, 3))
val num2 = spark.sparkContext.parallelize(List(3, 4, 5))
/* The union method takes an RDD as input and returns a new RDD that contains the union of the elements in
* the source RDD and the RDD passed to it as an input. C=A.union(B), c in C means c in A or in B*/
val numUnion = num1.union(num2)
println(s" The union of two num set is : ${numUnion.collect().mkString(",")}")
/* The intersection C=A.intersection(B), c in C means c in A and c in B*/
val numIntersection = num1.intersection(num2)
println(s" The intersection of two num set is : ${numIntersection.collect().mkString(",")}")
/* The subtract method takes an RDD as input and returns a new RDD that contains elements in the source
* RDD but not in the input RDD. C=A.subtract(B), c in C means c in A and c not in B.
* */
val numSubtract = num1.subtract(num2)
println(s" The subtract of two num set is : ${numSubtract.collect().mkString(",")}")
/** ********************************* Cartesian **********************************/
/* The cartesian method of an RDD takes an RDD as input and returns an RDD containing the cartesian
* product of all the elements in both RDDs. It returns an RDD of ordered pairs, in which the first element
* comes from the source RDD and the second element is from the input RDD. The number of elements in the
* returned RDD is equal to the product of the source and input RDD lengths.*/
val numCartesian = num1.cartesian(num2)
/** ***************************** zip and zipWithIndex **************************/
/* The zip method takes an RDD as input and returns an RDD of pairs, where the first element in a pair is
* from the source RDD and second element is from the input RDD. Unlike the cartesian method, the RDD
* returned by zip has the same number of elements as the source RDD. Both the source RDD and the input
* RDD must have the same length. In addition, both RDDs are assumed to have same number of partitions
* and same number of elements in each partition.*/
val numbers = spark.sparkContext.parallelize(List(1, 2, 3, 4))
val alphabets = spark.sparkContext.parallelize(List("a", "b", "c", "d"))
val zipPaires = numbers.zip(alphabets)
println(s"zipPaires value is : ${zipPaires.collect().mkString(",")}")
/* The zipWithIndex method zips the elements of the source RDD with their indices and returns an RDD of pairs.*/
val alphabetsWithIndex = alphabets.zipWithIndex()
println(s"zipPairesWithIndex value is : ${alphabetsWithIndex.collect().mkString(",")}")
/** ***************************** groupBy ***********************************/
/* groupBy method groups the elements of an RDD according to a user specified criteria. It takes as input a function
* that generates a key for each element in the source RDD. It applies this function to all the elements in the source
* RDD and returns an RDD of pairs. In each returned pair, the first item is a key and the second item is a collection
* of the elements mapped to that key by the input function to the groupBy method.
*
* Note that the groupBy method is an expensive operation since it may shuffle data.*/
/*Following rdd is the report of client satisfacation of a company's service*/
case class client_satisfait(manager_name: String, client_name: String, client_gender: String, client_age: Int, response_time: Double, statisfaction_level: Double)
val client_sat_lines = spark.sparkContext.textFile("/home/pliu/data_set/spark_data_set/spark_lessons/Lesson01_RDD/satisfait_client.data")
val client_rdd = client_sat_lines.map { l => {
if (l.isEmpty) {
println("This is an empty line")
}
else {
val s = l.split(",")
client_satisfait(s(0), s(1), s(2), s(3).toInt, s(4).toDouble, s(5).toDouble)
}
}
}
// println(s"client_rdd type: ${client_rdd.getClass.getName}")
// println(s"Sample of client_rdd : ${client_rdd.first()} ")
val groupByAge = client_rdd.groupBy(c => {
c match {
case client_satisfait(manager_name, client_name, client_gender, client_age, response_time, statisfaction_level) => client_age
case _ => 0
}
})
//println(s"groupByAge value is : ${groupByAge.collect().mkString(",")}")
/** ****************************KeyBy *************************************/
/* The keyBy method is similar to the groupBy method. It a higher-order method that takes as input a function
* that returns a key for any given element in the source RDD. The keyBy method applies this function to all the
* elements in the source RDD and returns an RDD of pairs. In each returned pair, the first item is a key which is
* calculated by using the input function and the second item is an element that was mapped to that key by the
* input function to the keyBy method. The RDD returned by keyBy will have the same number of elements as the
* source RDD*/
val keyByAge = client_rdd.keyBy(c => {
c match {
case client_satisfait(manager_name, client_name, client_gender, client_age, response_time, statisfaction_level) => client_age
case _ => 0
}
})
// println(s"KeyByAge value is : ${keyByAge.collect().mkString(",")}")
/** **************************************sortBy ********************************/
/* The sortBy method returns an RDD with sorted elements from the source RDD. It takes two input parameters.
* The first input is a function that generates a key for each element in the source RDD. The second argument
* allows you to specify ascending or descending order for sort.*/
val unSortedNums = spark.sparkContext.parallelize(List(4, 9, 1, 5, 8, 3, 2))
val sortedNums = unSortedNums.sortBy(x => x, true)
//println(s"sortedNums value is : ${sortedNums.collect.mkString(",")}")
val sortByAge = client_rdd.sortBy(c => {
c match {
case client_satisfait(manager_name, client_name, client_gender, client_age, response_time, statisfaction_level) => client_age
case _ => 0
}
})
//println(s"client satisfait rdd sortByAge value is : ${sortByAge.collect.mkString("|")} ")
/** ******************************** pipe *****************************************************/
/* The pipe method allows you to execute an external program in a forked process. It captures the output of the
* external program as a String and returns an RDD of Strings.*/
/** ****************************** randomSplit ****************************************************/
/* The randomSplit method splits the source RDD into an array of RDDs. It takes the weights of the splits as input.*/
val numbersTobeSplited = spark.sparkContext.parallelize((1 to 100).toList)
val splittedNumbers = numbersTobeSplited.randomSplit(Array(0.8, 0.2))
val firstSet = splittedNumbers(0)
val secondSet = splittedNumbers(1)
println(s"FirstSet has ${firstSet.count()}, Second set has ${secondSet.count()}")
/** *******************************coalesce ****************************************/
/* The coalesce method reduces the number of partitions in an RDD. It takes an integer input and returns a new RDD
* with the specified number of partitions. This function is very useful when you want to export your result to
* normal file system (not HDFS). So you can have all your result in one file not in 100 files. */
val allNumsInOnePartitions = numbersTobeSplited.coalesce(1, true)
/** ******************************repartition *************************************/
/* The repartition method takes an integer as input and returns an RDD with specified number of partitions.
* It is useful for increasing parallelism. It redistributes data, so it is an expensive operation.
* The coalesce and repartition methods look similar, but the first one is used for reducing the number
* of partitions in an RDD, while the second one is used to increase the number of partitions in an RDD.
*
* Note that the repartition has limit, for example, if you have a RDD with 3 element, you can't repartion it to
* have 4 partitions. And the size of each partition must be smaller than 2GB*/
val numsWithSixPartitions = numbersTobeSplited.repartition(6)
/** ***************************** sample ****************************************/
/* The sample method returns a sampled subset of the source RDD. It takes three input parameters. The first
* parameter specifies the replacement strategy. The second parameter specifies the ratio of the sample size to
* source RDD size. The third parameter, which is optional, specifies a random seed for sampling. if the seed is a fixed
* value, the sample can be reproduced */
val seed: Long = 0
val sampleOfNums = numbersTobeSplited.sample(true, 0.2, seed)
}
/** **********************************************************************************************
* ************************************* 1.3 RDDs operations : Actions **********************
* ***************************************************************************************************/
/* Actions are RDD methods that return a value to a driver program. Unlike transformations are lazy, Actions will be
* executed when it appears*/
def RddActionOperations(spark: SparkSession): Unit = {
/** ********************** RDDs Actions examples **********************/
val num1 = spark.sparkContext.parallelize(List(1, 2, 3, 3, 2, 1, 5, 6, 7))
val num2 = spark.sparkContext.parallelize(List(3, 4, 5))
/** ****************************** Collect ****************************************/
/* The collect method returns the elements in the source RDD as an array. This method should be used with
* caution since it moves data from all the worker nodes to the driver program. It can crash the driver program
* if called on a very large RDD.*/
val squareNumRdd = num1.map(x => x * x)
val squareNumRes = squareNumRdd.collect()
println(s"squareNumRes has type : ${squareNumRes.getClass().getName()}")
println(s"squareNumRes has value: ${squareNumRes.mkString(",")}")
/** ***********************count and countByValue **********************************/
/* The count method returns a count(long type) of the elements in the source RDD.*/
val countNum1 = num1.count()
println(s"countNum1 has type: ${countNum1.getClass.getName}")
println(s"countNum1 has value: ${countNum1}")
/* The countByValue method returns a count of each unique element in the source RDD. It returns an instance
* of the Map class containing each unique element and its count as a key-value pair.*/
val countByValueNum1 = num1.countByValue()
println(s"countByValueNum1 has value: ${countByValueNum1.mkString(",")}")
/** ******************first, take, takeOrdered, top *******************************/
/* The first method returns the first element in the source RDD. */
val firstElementOfNum1 = num1.first()
println(s"firstElementOfNum1 is : ${firstElementOfNum1}")
/* The top method takes an integer N as input and returns an array containing the N largest elements
* in the source RDD.*/
val biggestThreeElementOfNum1 = num1.top(3)
println(s"BiggestThreeElementOfNum1 has value: ${biggestThreeElementOfNum1.mkString(",")}")
/* The take method takes an integer N as input and returns an array containing the first N element in the
* source RDD.*/
val firstThreeElementOfNum1 = num1.take(3)
println(s"firstThreeElementOfNum1 has value: ${firstThreeElementOfNum1.mkString(",")}")
/* The take method takes an integer N as input and returns an array containing the first N element in the
* source RDD.*/
val smallestThreeElementOfNum1 = num1.takeOrdered(3)
println(s"smallestThreeElementOfNum1 has value: ${smallestThreeElementOfNum1.mkString(",")}")
/** *************************Min, Max **********************************************/
/* The min method returns the smallest element in an RDD. The max method returns the largest element in an RDD.
* For rdd of strings. It did return a value, but it's not right*/
val minNum = num1.min()
val maxNum = num1.max()
println(s"minNum has value ${minNum}, maxNum has value ${maxNum}")
/** *********************************************** Reduce ******************************/
/* The higher-order reduce method aggregates the elements of the source RDD using an associative and
* commutative binary operator provided to it. It is similar to the fold method; however, it does not require a
* neutral zero value.
*
* associative means (a op b) op c = a op (b op c)
* commutative means a op b = b op a
*
* + is associative and commutative
* / is neither associative nor commutative
* */
val sumReduce = num1.reduce((x, y) => x + y)
println(s"Sum of num1 done by reduce: ${sumReduce}")
/** ******************************************* fold ***********************************/
/* The higher-order fold method aggregates the elements in the source RDD using the specified neutral zero
* value and an associative binary operator. It first aggregates the elements in each RDD partition and then
* aggregates the results from each partition.
*
* The neutral zero value depends on the RDD type and the aggregation operation. For example, if you
* want to sum all the elements in an RDD of Integers, the neutral zero value should be 0. Instead, if you want
* to calculate the products of all the elements in an RDD of Integers, the neutral zero value should be 1.*/
/*
* fold[T](acc:T)((acc,value)=>acc)
* - T is the data type of RDD
* - acc is accumulator
* - value is the elements of RDDs
* To start fold we must give a initial acc value which has the same data type as RDD elements.
* */
val maxByFold = num1.fold(0)((acc, num) => {
if (acc < num) num else acc
})
println(s"Max value done by fold ${maxByFold}")
val productByFold = num1.fold(1)((acc, num) => {
acc * num
})
println(s"Product value done by fold ${productByFold}")
/** *************************************Aggregate **********************************************/
/*
* The aggregate function frees us from the constraint of having the return be the same type as the RDD which
* we are working on. With aggregate, like fold, we supply an initial zero value of the type we want to return.
* We then supply a function to combine the elements from our RDD with the accumulator. Finally, we need to
* supply a second function to merge two accumulators, given that each node accumulates its own results locally.
* */
/*The last argument 4 means the numbers of partitions of the rdd */
val flowers = spark.sparkContext.parallelize(List(11, 12, 13, 24, 25, 26, 35, 36, 37, 24, 15, 16), 4)
//println("Flowers rdd partions number "+flowers.partitions.size)
/*_+_ means sum of the args (x,y)=>x+y */
/*The fist function calculate with all elements inside the partition(Intra-partition)
* The second function calculate with the result of the first fonction(Inter-partition) */
val flowerSum = flowers.aggregate(0)((x, y) => x + y, (x, y) => x + y)
/*If the initial accumulator is not 0, the value will be add in each function*/
val simpleFlowerSum = flowers.aggregate(2)(_ + _, _ + _)
println("Sum done by aggregate " + flowerSum)
println("Sum done by aggregate " + simpleFlowerSum)
}
/** **********************************************************************************************
* ************************************* 1.4 Saving RDDs **********************
* ***************************************************************************************************/
def RddSaveOperation(spark: SparkSession): Unit = {
/* Generally, after data is processed, results are saved on disk. Spark allows an application developer to save
* an RDD to any Hadoop-supported storage system. An RDD saved to disk can be used by another Spark or
* MapReduce application.*/
/** ********************************Save as textFile *****************************************/
/* The saveAsTextFile method saves the elements of the source RDD in the specified directory on any
* Hadoop-supported file system. Each RDD element is converted to its string representation and stored as a
* line of text*/
val saveRdd = spark.sparkContext.parallelize(List("orange", "apple", "banana", "peach", "kiwi"))
/* Here I save it on a local file system. if you want to write to hdfs, you need to change file:// to hdfs://
* And if you want to write all result in a signle part file, you need to change the partion of the rdd. Because
* for each partion, spark witl write a part file in hdfs. */
saveRdd.saveAsTextFile("file:///tmp/saveRDDTest")
//change rdd partions to 1 then save it to hdfs
//saveRdd.coalesce(1).saveAsTextFile("hdfs://path")
/** ************************************Save as objectFile *****************************************/
/* The saveAsObjectFile method saves the elements of the source RDD as serialized Java objects in
* the specified directory. */
saveRdd.saveAsObjectFile("file:///tmp/saveRDDObjectTest")
/** ***********************************Save as sequence file ****************************************/
/* The saveAsSequenceFile method saves an RDD of key-value pairs in SequenceFile format. An RDD of keyvalue
* pairs can also be saved in text format using the saveAsTextFile */
val pairRdd = saveRdd.map(x => (x, x.contains("a")))
//saveRdd.saveAsSequenceFile("Path") does not work, because it's not a pair rdd
pairRdd.saveAsSequenceFile("file:///tmp/saveRDDSeqTest")
/* Note that all of the preceding methods take a directory name as an input parameter and create one file
* for each RDD partition in the specified directory. This design is both efficient and fault tolerant. Since each
* partition is stored in a separate file, Spark launches multiple tasks and runs them in parallel to write an RDD
* to a file system. It also helps makes the file writing process fault tolerant. If a task writing a partition
* to a file fails, Spark creates another task, which rewrites the file that was created by the failed task.*/
}
/** **********************************************************************************************
* ************************************* 1.5 Caching RDDs (Persistence) **********************
* *************************************************************************************************/
/*
* One of the most important capabilities in Spark is persisting (or caching) a dataset in memory across operations.
* By default, when an action method of an RDD is called, Spark creates that RDD from its parents, which may
* require creation of the parent RDDs, and so on. This process continues until Spark gets to the root RDD,
* which Spark creates by reading data from a storage system. This happens every time an action method is
* called. Thus, by default, every time an action method is called, Spark traverses the lineage tree of an RDD
* and computes all the transformations to obtain the RDD whose action method was called.
*
* For example. the following code will read textfile from disk twice, even tough there are only one line for textFile
*
* val logs=sc.textFile("path/to/log-files")
* val warningLogs= logs.filter{l=>l.contains("WARN")}
* val errorLogs=logs.filter{l=>l.contains("ERROR")}
* val warnCount=warningLogs.count
* val errorCount=errorLogs.count
*
* !!!!!!!!!!!!!!!!!!!!!!!!!!!!caching is lazy!!!!!!!!!!!!!!!!!!!!!!
* Caching rdd will wait the first time an action is called on the cached RDD to store it in memory. After caching,
* Spark stores it in the executor memory on each worker node. Each executor stores in memory the RDD partitions
* that it computes.
*
* So If an rdd will be used many times in a spark application (iterative algorithms), Caching it can make
* the performence better.
*
* When you persist an RDD, each node stores any partitions of it that it computes in memory and reuses them in other
* actions on that dataset (or datasets derived from it). This allows future actions to be much faster
* (often by more than 10x).
*
* You can mark an RDD to be persisted using the persist() or cache() methods on it. The persist method is a generic
* version of the cache method. It allows an RDD to be stored in memory, disk, or both. Cache method can only store in
* memory.
*
* Spark’s cache is fault-tolerant – if any partition of an RDD is lost, it will automatically be recomputed using
* the transformations that originally created it.
*
*
* In addition, each persisted RDD can be stored using a different storage level, allowing you, for example,
* to persist the dataset on disk, persist it in memory but as serialized Java objects (to save space), replicate it
* across nodes. These levels are set by passing a StorageLevel object (Scala, Java, Python) to persist().
* The cache() method is a shorthand for using the default storage level, which is StorageLevel.MEMORY_ONLY
* (store deserialized objects in memory).
* */
def RddCachingOperations(spark: SparkSession): Unit = {
val logPath = "/home/pliu/data_set/spark_data_set/spark_lessons/Lesson01_RDD/log.log"
val logs = spark.sparkContext.textFile(logPath)
val errorsAndWarnings = logs filter { l => l.contains("ERROR") || l.contains("WARN") }
//cache in memory
errorsAndWarnings.cache()
// The following code are persist code example
//errorsAndWarnings.persist(StorageLevel.MEMORY_ONLY)
//errorsAndWarnings.persist(StorageLevel.MEMORY_AND_DISK)
//Store RDD as seriallized java object, it uses less memory, but more cpu intensive to read.
//errorsAndWarnings.persist(StorageLevel.MEMORY_AND_DISK_SER_2)
val errorLogs = errorsAndWarnings.filter(l => l.contains("ERROR"))
val warnLogs = errorsAndWarnings.filter(l => l.contains("WARN"))
val errorCount = errorLogs.count()
val warnCount = warnLogs.count()
/* Cache Memory Management
* Spark automatically manages cache memory using LRU (least recently used) algorithm. It removes old
* RDD partitions from cache memory when needed. In addition, the RDD API includes a method called
* unpersist(). An application can call this method to manually remove RDD partitions from memory.*/
}
/** **********************************************************************************************
* ****************** 1.6 Use user define function(udf) in transformation and action******************
* *************************************************************************************************/
def RddUdfOperations(spark: SparkSession): Unit = {
/*
*
* Spark’s API relies heavily on passing functions in the driver program to run on the cluster. There are two
* recommended ways to do this:
* 1. Anonymous function syntax, which can be used for short pieces of code.(lamda exp)
* 2. Static methods in a global singleton object.
*
* For example, you can define object addTextToLines and method addEat and then use them in a map, as follows: */
val fruits = spark.sparkContext.parallelize(List("orange", "apple", "banana", "wiki"))
val eatFruits = fruits.map(line => addTextToLines.addEat(line))
println(s"eatFruits value: ${eatFruits.collect.mkString(",")}")
/* You can also call directly a function define inside the same object*/
val throwF = fruits.map(line => throwFruits(line))
println(s"throwF value: ${throwF.collect.mkString(",")}")
/*All transformation and actions which take a function as argument can use udf also*/
}
object addTextToLines {
def addEat(line: String): String = {
return "Eat " + line
}
}
def throwFruits(line: String): String = {
return "Throw " + line
}
/** *****************************************************************************************************************
* ************************************** Index ******************************************************************
* ***********************************************************************************************************/
/* Index 1
* RDDs transformation List:
* - map(func): Return a new distributed dataset formed by passing each element of the source through a function func.
*
* - flatMap(func): Similar to map, but each input item can be mapped to 0 or more output items (so func should
* return a Seq rather than a single item).
*
* - mapPartitions(func): Similar to map, but runs separately on each partition (block) of the RDD, so func
* must be of type Iterator<T> => Iterator<U> when running on an RDD of type T.
*
* - filter(func): Return a new dataset formed by selecting those elements of the source on which func returns true.
*
* - sample(withReplacement, fraction, seed): Sample a fraction fraction of the data, with or without
* replacement, using a given random number generator seed.
*
* - mapPartitionsWithIndex(func): Similar to mapPartitions, but also provides func with an integer value
* representing the index of the partition, so func must be of type
* (Int, Iterator<T>) => Iterator<U> when running on an RDD of type T.
*
* - union(otherDataset): Return a new dataset that contains the union of the elements in the source dataset
* and the argument.
*
* - intersection(otherDataset): Return a new RDD that contains the intersection of elements
* in the source dataset and the argument.
*
* - distinct([numTasks])): Return a new dataset that contains the distinct elements of the source dataset.
*
* - groupByKey([numTasks]): When called on a dataset of (K, V) pairs, returns a dataset of
* (K, Iterable<V>) pairs.
* Note: If you are grouping in order to perform an aggregation
* (such as a sum or average) over each key, using reduceByKey or aggregateByKey
* will yield much better performance.
* Note: By default, the level of parallelism in the output depends on the number
* of partitions of the parent RDD. You can pass an optional numTasks argument to
* set a different number of tasks.
*
* - reduceByKey(func, [numTasks]) When called on a dataset of (K, V) pairs, returns a dataset of (K, V) pairs
* where the values for each key are aggregated using the given reduce function
* func, which must be of type (V,V) => V. Like in groupByKey, the number of
* reduce tasks is configurable through an optional second argument.
*
* - aggregateByKey(zeroValue)(seqOp, combOp, [numTasks]) When called on a dataset of (K, V) pairs, returns a
* dataset of (K, U) pairs where the values for each key
* are aggregated using the given combine functions and a
* neutral "zero" value. Allows an aggregated value type
* that is different than the input value type, while
* avoiding unnecessary allocations. Like in groupByKey,
* the number of reduce tasks is configurable through
* an optional second argument.
*
* - sortByKey([ascending], [numTasks]) When called on a dataset of (K, V) pairs where K implements Ordered,
* returns a dataset of (K, V) pairs sorted by keys in ascending or
* descending order, as specified in the boolean ascending argument.
*
* -join(otherDataset, [numTasks]) When called on datasets of type (K, V) and (K, W), returns a dataset of
* (K, (V, W)) pairs with all pairs of elements for each key. Outer joins are
* supported through leftOuterJoin, rightOuterJoin, and fullOuterJoin.
*
* - cogroup(otherDataset, [numTasks]) When called on datasets of type (K, V) and (K, W), returns a dataset of
* (K, (Iterable<V>, Iterable<W>)) tuples. This operation is also called groupWith.
*
* - cartesian(otherDataset) When called on datasets of types T and U, returns a dataset of (T, U) pairs (all pairs of elements).
*
* - pipe(command, [envVars]) Pipe each partition of the RDD through a shell command, e.g. a Perl or bash script.
* RDD elements are written to the process's stdin and lines output to its stdout are
* returned as an RDD of strings.
*
* - coalesce(numPartitions) Decrease the number of partitions in the RDD to numPartitions. Useful for running
* operations more efficiently after filtering down a large dataset.
*
* - repartition(numPartitions) Reshuffle the data in the RDD randomly to create either more or fewer partitions
* and balance it across them. This always shuffles all data over the network.
*
* - repartitionAndSortWithinPartitions(partitioner) Repartition the RDD according to the given partitioner and,
* within each resulting partition, sort records by their keys.
* This is more efficient than calling repartition and then
* sorting within each partition because it can push the sorting
* down into the shuffle machinery.
*
* */
/* Index 2.
* RDDs actions list:
* - reduce(func): Aggregate the elements of the dataset using a function func (which takes two arguments and
* returns one). The function should be commutative and associative so that it can be computed
* correctly in parallel.
*
* - top(n): return the top n element of the dataset
*
* - collect(): Return all the elements of the dataset as an array at the driver program. This is usually useful
* after a filter or other operation that returns a sufficiently small subset of the data.
*
* - count() Return the number of elements in the dataset.
*
* - first() Return the first element of the dataset (similar to take(1)).
*
* - take(n) Return an array with the first n elements of the dataset.
*
* - takeSample(withReplacement, num, [seed]) Return an array with a random sample of num elements of the dataset,
* with or without replacement, optionally pre-specifying a random number
* generator seed.
*
* - takeOrdered(n, [ordering]) Return the first n elements of the RDD using either their natural order or a
* custom comparator.
*
* - saveAsTextFile(path) Write the elements of the dataset as a text file (or set of text files) in a given
* directory in the local filesystem, HDFS or any other Hadoop-supported file system.
* Spark will call toString on each element to convert it to a line of text in the file.
*
* - saveAsSequenceFile(path) (Java and Scala) Write the elements of the dataset as a Hadoop SequenceFile in a
* given path in the local filesystem, HDFS or any other Hadoop-supported file system.
* This is available on RDDs of key-value pairs that implement Hadoop's Writable interface.
* In Scala, it is also available on types that are implicitly convertible to Writable
* (Spark includes conversions for basic types like Int, Double, String, etc).
*
* - saveAsObjectFile(path) (Java and Scala) Write the elements of the dataset in a simple format using Java
* serialization, which can then be loaded using SparkContext.objectFile().
*
* - countByKey() Only available on RDDs of type (K, V). Returns a hashmap of (K, Int) pairs with the count of each key.
*
* - foreach(func) Run a function func on each element of the dataset. This is usually done for side effects such
* as updating an Accumulator or interacting with external storage systems. Note: modifying variables
* other than Accumulators outside of the foreach() may result in undefined behavior. See
* Understanding closures for more details.
*
* */
/* Index 3.
* The full set of storage levels is:
* MEMORY_ONLY -> Store RDD as deserialized Java objects in the JVM. If the RDD does not fit in memory,
* some partitions will not be cached and will be recomputed on the fly each time they're needed.
* This is the default level.
* MEMORY_AND_DISK -> Store RDD as deserialized Java objects in the JVM. If the RDD does not fit in memory,
* store the partitions that don't fit on disk, and read them from there when they're needed.
* MEMORY_ONLY_SER -> (Java and Scala) Store RDD as serialized Java objects (one byte array per partition).
* This is generally more space-efficient than deserialized objects, especially when using a
* fast serializer, but more CPU-intensive to read.
* MEMORY_AND_DISK_SER -> (Java and Scala) Similar to MEMORY_ONLY_SER, but spill partitions that don't fit in
* memory to disk instead of recomputing them on the fly each time they're needed.
* DISK_ONLY -> Store the RDD partitions only on disk.
* MEMORY_ONLY_2, MEMORY_AND_DISK_2, etc. -> Same as the levels above, but replicate each partition on
* two cluster nodes.
* OFF_HEAP (experimental) -> Similar to MEMORY_ONLY_SER, but store the data in off-heap memory.
* This requires off-heap memory to be enabled.
* */
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/TaobaoSales.scala | package org.pengfei.spark.application.example
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}
object TaobaoSales {
def main(args:Array[String]): Unit ={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local").
appName("TaobaoSales").
getOrCreate()
val userLogDF=getDFFromDB(spark)
/*val filePath="file:///DATA/data_set/spark/taobao_data_set/small_user_log.csv"
val userLogDF=getDFFromCSV(spark,filePath)*/
//userLogDF.show(5)
userLogDF.write.format("parquet").save("file:///tmp/taobao.parquet")
}
def getDFFromDB(spark : SparkSession): DataFrame ={
val userLogDF=spark.read.format("jdbc").option("url", "jdbc:postgresql://127.0.0.1:5432/dbtaobao").option("driver","org.postgresql.Driver").option("dbtable", "user_log").option("user", "pliu").option("password", "<PASSWORD>").load()
return userLogDF
}
def getDFFromCSV(spark:SparkSession,filePath:String):DataFrame ={
val userLogSchema = StructType(Array(
StructField("user_id",IntegerType,true),
StructField("item_id",IntegerType,true),
StructField("cat_id",IntegerType,true),
StructField("merchant_id",IntegerType,true),
StructField("brand_id",IntegerType,true),
StructField("month",StringType,true),
StructField("day",StringType,true),
StructField("action",IntegerType,true),
StructField("age_range",IntegerType,true),
StructField("gender",IntegerType,true),
StructField("province",StringType,true)
))
val userLogDF= spark.read.format("csv").option("header","false").schema(userLogSchema).load(filePath)
return userLogDF
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson00_Spark_Core/Lesson00_Spark_Basic_Concept.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson00_Spark_Core/Lesson00_Spark_Basic_Concept.scala
package org.pengfei.Lesson00_Spark_Core
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object Lesson00_Spark_Basic_Concept {
/*********************************************************************************************
* *****************************0. Spark introduction ***************************************
* ******************************************************************************************/
/* Spark is an in-memory cluster computing framework for processing and analyzing large amounts of
* data, It provides a simple programming interface, which enables an applications developer to easily
* use the cpu, memory, and storage resources across a cluster of servers for processing large datasets */
/*Key components of a spark cluster
* 1.Master Node
* 1.1 Driver Program : A driver program is an spark application that uses Spark as a library. It provides the data
* processing code that Spark executes on the worker nodes. A driver program can launch one or
* more jobs on a Spark cluster.
* 1.2 Cluster Manager :(standalone(spark default resource manager), yarn, mesos, ec2, Kubernetes), cluster manager
* (resource manager)
* can acquire cluster resource for executing a job. It provides low-level scheduling of cluster
* resources across applications. It enables multiple applications to share cluster resources
* and run on the same worker nodes.
*
* 2. Worker Node : worker node provides CPU, memory, and storage to a spark application.
* 2.1 Executor : spark executors runs on worker node as distributed process of a Spark application(aka. driver
* program). An executor is a JVM process that Spark creates on each worker for an application.
* It executes application code concurrently in multiple threads. It can also cache data in memory
* or disk. An executor has the same lifespan as the application for which it is created. When a Spark
* application terminates, all executors created for it also terminate.
*
* 2.2 Tasks : A task is the smallest unit of work that Spark sends to an executor. It is executed by a thread in an
* executor on a worker node. Each task performs some computations to either return a result to a driver
* program or partition its output for shuffle. Spark creates a task per data partition. An executor runs
* one or more tasks concurrently. The amount of parallelism is determined by the number of partitions.
* More partitions mean more tasks processing data in parallel.
*
* 2.3 Executor number on a worker : If you specify the amount of executors when invoking spark-submit you should get
* the amount you ask for –num-executors X If you do not specify then by default Spark should
* use dynamic allocation which will start more executors if needed. In this case you can
* configure the behaviour, e.g. max number of executors,
* see http://spark.apache.org/docs/latest/configuration.html#dynamic-allocation
* */
/*
* Other important terminology
* Shuffle: A shuffle redistributes data among a cluster of nodes. It is an expensive operation because it
* involves moving data across a network. Note that a shuffle does not randomly redistribute data;
* it groups data elements into buckets based on some criteria. Each bucket forms a new partition.
* Job: A job is a set of computations that Spark performs to return results to a driver program. Essentially,
* it is an execution of a data processing algorithm on a Spark cluster. An application can launch multiple jobs.
* Stage: A stage is a collection of tasks. Spark splits a job into a DAG of stages. A stage may depend on another
* stage. For example, a job may be split into two stages, stage 0 and stage 1, where stage 1 cannot begin
* until stage 0 is completed. Spark groups tasks into stages using shuffle boundaries. Tasks that do not
* require a shuffle are grouped into the same stage. A task that requires its input data to be shuffled begins
* a new stage.
*/
/* For more details of spark introduction, please visit my wiki page
employes:pengfei.liu:big_data:spark:l01_spark_introduction*/
def main(args:Array[String]): Unit ={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("LessonO_Spark_Core").
getOrCreate()
// import sparkSession.implicits._ for all schema conversion magic.
//you can get sparkContext with sparkSession
val sc=spark.sparkContext
val sqlc=spark.sqlContext
//Create a rdd with a list
val rddExample=sc.parallelize(List("I love meat","apple","orange","it's ok"))
println(s"RDD contains ${rddExample.count()} element" )
println(s"RDD content values : ${rddExample.collect().toList.toString()}")
//We will see how to play with rdd in the next lesson
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/classification/Word2Vec.scala | <gh_stars>0
package org.pengfei.spark.ml.classification
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.Word2Vec
import org.apache.spark.sql.SparkSession
object Word2Vec {
def main(args:Array[String]): Unit ={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("Word2Vec").
getOrCreate()
import spark.implicits._
val documentDF = spark.createDataFrame(Seq(
"Hi I heard about Spark".split(" "),
"I wish Java could use case classes".split(" "),
"Logistic regression models are neat".split(" ")
).map(Tuple1.apply)).toDF("text")
//documentDF.printSchema()
/*
* root
|-- text: array (nullable = true)
| |-- element: string (containsNull = true)
*
* */
//documentDF.show()
/*+--------------------+
| text|
+--------------------+
|[Hi, I, heard, ab...|
|[I, wish, Java, c...|
|[Logistic, regres...|
+--------------------+*/
/*
* Create an instance of word2Vec model, it's a estimator
* */
//input col set the input column of the input dataset
//output col set the output column of the result dataset
//vectorSize set the size of generated word vec, in our case it's 6
val word2Vec = new Word2Vec().
setInputCol("text").
setOutputCol("result").
setVectorSize(6).
setMinCount(0)
/*
* Train the model with the dataset
* */
val model = word2Vec.fit(documentDF)
/*println(model.getClass().getName)
org.apache.spark.ml.feature.Word2VecModel*/
/*
* transform the data set to word vector
* */
val result = model.transform(documentDF)
/*println(result.getClass().getName)
org.apache.spark.sql.Dataset*/
result.show()
/*
* +--------------------+--------------------+
| text| result|
+--------------------+--------------------+
|[Hi, I, heard, ab...|[0.01390241272747...|
|[I, wish, Java, c...|[0.01954013934092...|
|[Logistic, regres...|[-2.5894027203321...|
+--------------------+--------------------+
* */
result.select("result").take(3).foreach(println)
/*
* [[0.013902412727475166,0.00704740546643734,0.00576745766447857,-0.03196578547358513,0.0022785402019508184,0.030404809676110745]]
[[0.01954013934092862,0.010227076576224394,0.008941462795649256,0.01654639121677194,-0.03726007044315338,-0.00852930758680616]]
[[-2.5894027203321457E-4,0.025160790234804154,-0.001287880726158619,-0.024124881252646446,0.0072902611456811435,-0.008568133413791658]]
*
* */
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/tmp/Test.scala | <reponame>pengfei99/Spark<gh_stars>0
package org.pengfei.tmp
import java.util
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{BooleanType, IntegerType, StringType, StructField, StructType}
import org.pengfei.Lesson04_Spark_SQL.MergeListsUDAF
import scala.collection.mutable
object Test {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Test").master("local[2]").getOrCreate()
import spark.implicits._
val chantier_201_filePath="/home/pliu/Documents/Projects/Hyperthesaux/Sample_Data/Bibracte/Extraction_BDB/export_bdB_10_09_2019/201chantier.csv"
val chantier_201_Schema = StructType(Array(
StructField("annees_de_fonctionne", StringType, true),
StructField("auteur_saisie", StringType, true),
StructField("chantier", IntegerType, true),
StructField("code_OA", IntegerType, true),
StructField("commentaire", StringType, true),
StructField("commentaire_chantier", StringType, true),
StructField("compteur", StringType, true),
StructField("date_derniere_modif", StringType, true),
StructField("date_saisie", StringType, true),
StructField("lieu_dit_adresse", StringType, true),
StructField("localisation_cadastral", IntegerType, true),
StructField("nom_chantier", StringType, true),
StructField("nom_commune", StringType, true),
StructField("nom_departement", StringType, true),
StructField("numero_INSEE_comm", IntegerType, true),
StructField("proprietaire", StringType, true),
StructField("proprietaire unique", StringType, true),
StructField("tampon_1", StringType, true),
StructField("total_fiche_trouvee", IntegerType, true),
StructField("xcentroide", IntegerType, true),
StructField("xmax", IntegerType, true),
StructField("xmin", IntegerType, true),
StructField("ycentroide", IntegerType, true),
StructField("ymax", IntegerType, true),
StructField("ymin", IntegerType, true)))
val chantierDF = spark.read.format("com.databricks.spark.csv").option("header", "false").schema(chantier_201_Schema).load(chantier_201_filePath)
chantierDF.show(5,false)
val beuvrayDF=chantierDF.filter(col("nom_chantier").contains("<NAME>"))
beuvrayDF.show(15)
chantierDF.select("auteur_saisie").distinct().show(10);
val rapDf=chantierDF.filter(col("auteur_saisie")===("<NAME>"))
rapDf.show(5)
val userEnterCount=chantierDF.groupBy("auteur_saisie").count().orderBy(col("count").desc).show()
val countNull=chantierDF.select("nom_chantier").filter(col("nom_chantier").isNull).count()
println
}
def myConcat(text1:String,text2:String):String={
text1.concat(text2)
}
def combineTwoColumn(cluster:Int,label:String):(Int,String)={
return (cluster,label)
}
def labelCount(label:String):(String,Int)={
return (label,1)
}
def entropy(counts: Iterable[Int]):Double={
// get all positive values in the counts collection
val values=counts.filter(_ >0)
// cast all values to double and do sum
val n = values.map(_.toDouble).sum
val entropy=values.map{v=>
//calculate p first
val p=v / n
//
-p * math.log(p)
}.sum
/* We can use the following code, if you don't want to define a local variable
values.map { v =>
-(v / n) * math.log(v / n)
}.sum
*/
return entropy
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_2_2_2_Anomaly_Detection_Algo.scala | package org.pengfei.Lesson05_Spark_ML
object Lesson05_2_2_2_Anomaly_Detection_Algo {
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/CustomArthMean.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson04_Spark_SQL
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
/*
* This class extends UserDefinedAggregatedFunction to write custom arthmetic mean as aggregate function
* You can also specify any constructor arguments. For instance you can have CustomMean(arg1: Int, arg2: String)
* In this example, we just use an empty constructor
* */
class CustomArthMean extends UserDefinedAggregateFunction{
//Define input data type schema, it's the data type of your column, in our case, we choose double for all numbers
override def inputSchema: StructType = StructType(Array(StructField("item", DoubleType)))
// intermediate value schema, it can be different from the input value. in our case, as we calculate mean, we need to
// do sum first, then count the size of all number list. So we need sum:Double, count:Long
override def bufferSchema: StructType = StructType(Array(
StructField("sum",DoubleType),
StructField("count",LongType)
))
//Return type of the function
override def dataType: DataType = DoubleType
// The function is deterministic
override def deterministic: Boolean = true
// This function is called whenever key changes, we need to reset sum and count to 0 for each group in the groupBy
// buffer(0) is for storing intermediate value of sum, buffer(1) is for count
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0)=0.toDouble
buffer(1)=0L
}
//Iterate over each entry of a group
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
//aggregate the sum with input
buffer(0)=buffer.getDouble(0)+input.getDouble(0)
// increase count to 1
buffer(1)=buffer.getLong(1)+1
}
/*Merge two partial aggregates, buffer1 is the new intermediate aggregator, buffer2 is the result of each group update
* return value. We merge all these return values to buffer1*/
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1(0)=buffer1.getDouble(0)+buffer2.getDouble(0)
buffer1(1)=buffer1.getLong(1)+buffer2.getLong(1)
}
// called after all the entries has been visited. It returns the final output of the aggregate function. In our case,
// we use sum/count to return the mean.
override def evaluate(buffer: Row): Any = {
buffer.getDouble(0)/buffer.getLong(1)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson10_Spark_Application_ETL/Lesson10_1_record_deduplication.scala | package org.pengfei.Lesson10_Spark_Application_ETL
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.Dataset
object Lesson10_1_record_deduplication {
/*******************************************************************************************************
*******************************10.1 Record deduplication***********************************************
*******************************************************************************************************/
/********************************10.1.1 Introduction*********************************************/
/* The problem that we’re going to study in this chapter goes by a lot of different names
* in the literature and in practice: record linkage, entity resolution, record deduplication, merge-and-purge,
* and list washing.
*
* The general structure of the problem is something like this: we have a large collection
* of records from one or more source systems, and it is likely that multiple records
* refer to the same underlying entity, such as a customer, a patient, or the location of a
* business or an event. Each entity has a number of attributes, such as a name, an
* address, or a birthday, and we will need to use these attributes to find the records that
* refer to the same entity. Unfortunately, the values of these attributes aren’t perfect:
* values might have different formatting, typos, or missing information that means that
* a simple equality test on the values of the attributes will cause us to miss a significant
* number of duplicate records.
*
* For example
* Name | Address | City | State | Phone
* Josh’s Coffee Shop | 1234 Sunset Boulevard | West Hollywood | CA | (213)-555-1212
* Josh Coffee | 1234 Sunset Blvd West | Hollywood | CA | 555-1212
* Coffee Chain #1234 | 1400 Sunset Blvd #2 | Hollywood | CA | 206-555-1212
* Coffee Chain Regional Office | 1400 Sunset Blvd Suite 2 | Hollywood | California | 206-555-1212
*
* The first two entries in this table refer to the same small coffee shop, even though a
* data entry error makes it look as if they are in two different cities (West Hollywood
* and Hollywood). The second two entries, on the other hand, are actually referring to
* different business locations of the same chain of coffee shops that happen to share a
* common address: one of the entries refers to an actual coffee shop, and the other one
* refers to a local corporate office location. Both of the entries give the official phone
* number of corporate headquarters in Seattle.
* */
/********************************10.1.2 sample Dataset*********************************************/
/* In this lesson, we will use a sample data from UC irvine Machine Learning repository.
* The data set we’ll analyze was curated from a record linkage study performed
* at a German hospital in 2010, and it contains several million pairs of patient
* records that were matched according to several different criteria, such as the patient’s
* name (first and last), address, and birthday. Each matching field was assigned a
* numerical score from 0.0 to 1.0 based on how similar the strings were, and the data
* was then hand-labeled to identify which pairs represented the same person and
* which did not.
*
* Each row represent a match of two patient record, id_1 is the id for patient 1, id_2 is the id for patient 2
*
* The underlying values of the fields that were used to create the data set
* were removed to protect the privacy of the patients. Numerical identifiers, the match
* scores for the fields, and the label for each pair (match versus non-match) were published
* for use in record deduplication research.
*
* $ mkdir linkage
* $ cd linkage/
* $ curl -L -o donation.zip https://bit.ly/1Aoywaq
* $ unzip donation.zip
* $ unzip 'block_*.zip'*/
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().master("local[2]").appName("Lesson10_Spark_Application_ETL").getOrCreate()
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val filePath=s"${path}/spark_lessons/Lesson10_Spark_Application_ETL/hospital_data"
val block1Name="/block_1.csv"
/* One example line of the csv file : 39086,47614,1,?,1,?,1,1,1,1,1,TRUE, ? means nullValue, in the following spark
* read, in option, we specify this. */
val block1Df=spark.read.option("header","true").option("nullValue","?").option("inferSchema","true").csv(filePath+block1Name)
/********************************10.1.3 understand Dataset*********************************************/
// UnderstandDFOperation(spark,block1Df)
/* With the above command, we know the schema, the size of the dataset, Now we need to understand each column
* - The first two fields (id_1, id_2)are integer IDs that represent the patients that were matched in the record
* - The next nine values are (possibly missing) numeric values (either doubles or ints) that represent match
* scores on different fields of the patient records, such as their names, birthdays, and locations. The fields
* are stored as integers when the only possible values are match (1) or no-match (0), and doubles
* whenever partial matches are possible.
* - The last field is a boolean value (true or false) indicating whether or not the pair of patient records
* represented by the line was a match.
*
* The goal of this lesson is to come up with a simple clssifier that allow us to predict whether a record will
* be a match based on the values of the match scores.
*
* */
/********************************10.1.4 basic statistics of Dataset*********************************************/
// BasicStatsExample(spark,block1Df)
/********************************10.1.5 Pivoting and Reshaping of Dataset*********************************************/
/* 1. For example, we want to transform the summary dataframe
*
* |summary| id_1| id_2| cmp_fname_c1| cmp_fname_c2| cmp_lname_c1| ... |
+-------+------------------+-----------------+------------------+------------------+-------------------+
| count| 574913| 574913| 574811| 10325| 574913|
| mean|33271.962171667714| 66564.6636865056|0.7127592938252765|0.8977586763518972|0.31557245780987964|
| stddev| 23622.66942593358|23642.00230967225|0.3889286452463553|0.2742577520430534| 0.3342494687554251|
| min| 1| 6| 0.0| 0.0| 0.0|
| max| 99894| 100000| 1.0| 1.0| 1.0|
+-------+------------------+-----------------+------------------+------------------+
*
* to the following form
*
* +------+------------+-------------------+
|metric| field| value|
+------+------------+-------------------+
| count| id_1| 5749132.0|
| count| id_2| 5749132.0|
| count|cmp_fname_c1| 5748125.0|
...
| count| cmp_by| 5748337.0|
| count| cmp_plz| 5736289.0|
| mean| id_1| 33324.48559643438|
| mean| id_2| 66587.43558331935|
...
*
* To resolve this kind of transformation, you must start your process from the input dataframe, if I can process the
* dataframe by iterating each row, or some columns.
*
* In our example, we know row 1 has all count value of each filed, each value's potions correspond a filed name
* e.g. row(1)'s value is for id_1.
* row of dataframe is an array of objects.
* 2. After the transformation to longForm we want to see the count, mean for each field
*/
PivotingDataFrameExample(block1Df)
/**********************10.1.6 Selecting features ****************************************/
/*
* A good feature for classification problem has two properties:
* - The good feature tends to have significantly different values for different label (in our case, it's matches or
* nonmatches). So the difference between the means of the field of different label will be large
*
* - The good features must occur often enough in the data that we can rely on it to be regularly available for
* any pair of records.
*
*/
// SelectingFeaturesOperations(block1Df)
/* The below stats shows the total which is the total count of the field in both match and unmatch dataset
* delta is the difference between mean of the match and unmatch.
+------------+--------+-------------------+
| field| total| delta|
+------------+--------+-------------------+
| cmp_plz|573618.0| 0.9524975516429005|
|cmp_lname_c2| 239.0| 0.8136949970410104|
| cmp_by|574851.0| 0.7763379425859384|
| cmp_bd|574851.0| 0.7732820129086737|
|cmp_lname_c1|574913.0| 0.6844795197263095|
| cmp_bm|574851.0| 0.510834819548174|
|cmp_fname_c1|574811.0| 0.2853115682852544|
|cmp_fname_c2| 10325.0|0.09900440489032625|
| cmp_sex|574913.0|0.03452211590529575|
+------------+--------+-------------------+
* cmp_fname_c2 isn’t very useful because it’s missing a lot of the time and the difference in the
* mean value for matches and nonmatches is relatively small—0.09, for a score that ranges from 0 to 1.
* The cmp_sex feature also isn’t particularly helpful because even though it’s available for any pair of records, the
* difference in means is just 0.03.
*
* Features cmp_plz and cmp_by, on the other hand, are excellent. They almost always occur for any pair of records,
* and there is a very large difference in the mean values (more than 0.77 for both features.) Features cmp_bd,
* cmp_lname_c1, and cmp_bm also seem beneficial: they are generally available in the data set and the difference,
* in mean values for matches and nonmatches are substantial.
*
* Features cmp_fname_c1 and cmp_lname_c2 are more of a mixed bag: cmp_fname_c1 doesn’t discriminate all that well
* (the difference in the means is only 0.28) even though it’s usually available for a pair of records, whereas
* cmp_lname_c2 has a large difference in the means but it’s almost always missing. It’s not quite obvious under
* what circumstances we should include these features in our model based on this data.
*
* For now, we’re going to use a simple scoring model that ranks the similarity of pairs of records based on the
* sums of the values of the obviously good features: cmp_plz, cmp_by, cmp_bd, cmp_lname_c1, and cmp_bm. For the
* few records where the values of these features are missing, we’ll use 0 in place of the null value in our sum.
* We can get a rough feel for the performance of our simple model by creating a data frame of the computed scores
* and the value of the is_match column and evaluating how well the score discriminates between matches and
* nonmatches at various thresholds.*/
/********************** 10.1.7 Preparing models for production environments *************************************/
// ProductionExample(block1Df)
}
def UnderstandDFOperation(spark:SparkSession,df:DataFrame):Unit={
df.printSchema()
val rowNum=df.count()
val columnNum=df.columns.length
println(s"Block1 has ${rowNum} rows and ${columnNum} columns")
df.show(5)
}
/********************************10.1.4 basic statistics of Dataset*********************************************/
def BasicStatsExample(spark:SparkSession,df:DataFrame):Unit={
// count match and unmatch number
df.groupBy("is_match").count().orderBy(desc("count")).show()
// get avg and stddev of cmp_sex
df.agg(avg("cmp_sex"), stddev("cmp_sex")).show()
// summary statiscs
val summary= df.describe()
summary.show()
val matched=df.where("is_match=true")
matched.describe()
val unmatched=df.filter(df("is_match")===false)
unmatched.describe()
}
/********************************10.1.5 Pivoting and Reshaping of Dataset*********************************************/
def PivotingDataFrameExample(df:DataFrame):Unit={
//we can get the sparkSession from the datafram
import df.sparkSession.implicits._
//get summary stats
val summary= df.describe()
// get the sechma of df which has type StructType(StructField(id_1,IntegerType,true), StructField*)
val schema = df.schema
//println(schema.toString())
summary.show(5)
val longForm:Dataset[(String,String,Double)]= summary.flatMap(row=>{
// row.getString(0) get the first element of the current row which is the metric name (e.g. count, mean)
val metric=row.getString(0)
// we loop over the rest of the elements in the current row
(1 until row.size).map(i=> {
(metric,schema(i).name,row.getString(i).toDouble)
//schema(i).name returns the name of the column, row.getString(i).toDouble returns the value of the column of
// the current row
})
})
val longDF=longForm.toDF("metric","field","value")
longDF.show()
/* We want to also reverse */
/****************Now, we want to see the mean, count of each value*********/
val wideDF=longDF.groupBy("field").pivot("metric",Seq("count","mean","stddev","min","max"))
.agg(first("value"))
/*The pivot operator needs to know the distinct set of values of the pivot column that we want to use for
* the columns, and we can specify the value in each cell of the wide table by using an agg(first) operation
* on the values column, which works correctly because there is only a single value for each combination
* of field and metric*/
wideDF.show()
}
def SelectingFeaturesOperations(df:DataFrame):Unit={
val spark=df.sparkSession
val matchSummary=df.filter(df("is_match")===true).describe()
val missSummary=df.where("is_match=false").describe()
/*matchSummary.show(5)
missSummary.show(5)*/
// use the method pivotSummary in Pivot.scala to transform the match and miss dataset
val matchST=Pivot.pivotSummary(matchSummary)
val missST=Pivot.pivotSummary(missSummary)
//matchST.show(5)
//missST.show(5)
/* We can use isin function to execulde the field id_1, id_2, because we know it's not a feature
* We select only field , count, and mean column*/
val execuldeList=List("id_2","id_1")
val cleanMatch=matchST.filter(!col("field").isin(execuldeList:_*)).select("field","count","mean")
//cleanMatch.show()
val cleanUnmatch=missST.filter(!col("field").isin(execuldeList:_*)).select("field","count","mean")
//cleanUnmatch.show()
/* With the following code, we have a column name problem, we have duplicate column name of count, mean
* val innerJoin=cleanMatch.join(cleanUnmatch,cleanMatch("field")===cleanUnmatch("field"),"inner")
* innerJoin.show()
*/
/* To solve this problem, we need to use alias for dataframfe and column*/
val df_match=cleanMatch.as("dfmatch")
val df_unmatch=cleanUnmatch.as("dfunmatch")
val joinDF=df_match.join(df_unmatch,col("dfmatch.field")===col("dfunmatch.field"),"inner")
//joinDF.select("dfmatch.count","dfunmatch.count","dfmatch.mean","dfunmatch.mean")show()
val featureDf=joinDF.withColumn("total",col("dfmatch.count")+col("dfunmatch.count"))
.withColumn("delta",col("dfmatch.mean")-col("dfunmatch.mean"))
.select("dfmatch.field","total","delta")
featureDf.show()
// Create temp view for sql query
/* matchST.createOrReplaceTempView("match_desc")
missST.createOrReplaceTempView("miss_desc")*/
/*spark.sql("""select * from match_desc limit 5""").show()*/
/*spark.sql(
"""SELECT a.field, a.count + b.count total, a.mean - b.mean delta FROM match_desc a INNER JOIN miss_desc b ON a.field = b.field WHERE a.field NOT IN ("id_1", "id_2") ORDER BY delta DESC, total DESC""").show()
*/
/* Conclusion: some times sql is much easier and shorter to write*/
}
def ProductionExample(df:DataFrame):Unit={
import df.sparkSession.implicits._
//we can use as function to transform dataframe to a dataset
val matchData = df.as[MatchData]
matchData.show()
/* For our scoring function, we are going to sum up the value of one field of type
* Option[Double] (cmp_lname_c1) and four fields of type Option[Int] (cmp_plz,
* cmp_by, cmp_bd, and cmp_bm). Let’s write a small helper case class to cut down on
* some of the boilerplate code associated with checking for the presence of the Option values
*
**/
}
/* The Score case class starts with a value of type Double (the running sum) and defines a \+ method that
* allows us to merge an Option[Int] value into the running sum by getting the value of the Option or
* returning 0 if it is missing. Here, we’re taking advantage of the fact that Scala lets you define
* functions using a much broader set of names than Java to make our scoring function a bit eaiser to read:*/
case class Score(value:Double){
def +(oi:Option[Int])={
Score(value+oi.getOrElse(0))
}
}
case class MatchData(
id_1:Int,
id_2:Int,
cmp_fname_c1:Option[Double],
cmp_fname_c2:Option[Double],
cmp_lname_c1:Option[Double],
cmp_lname_c2:Option[Double],
cmp_sex:Option[Int],
cmp_bd:Option[Int],
cmp_bm:Option[Int],
cmp_by:Option[Int],
cmp_plz:Option[Int],
is_match:Boolean
)
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/formation/TweetsStat.scala | package org.pengfei.spark.formation
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.expr
object TweetsStat {
def main(args:Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("TweetsStat").
getOrCreate()
//spark.conf.set("")
import spark.implicits._
val inputFile = "file:////home/pliu/Downloads/tweets.csv"
val jsonInputFile = "file:///tmp/test/output/total"
val tweetDF=spark.read.json(jsonInputFile)
val textFile=spark.read.text(inputFile).as[String]
/* counts words in all text */
//val counts=textFile.flatMap(line=>line.split(" ")).groupByKey(_.toLowerCase).count()
//counts.show()
//counts.filter($"value".contains("trump")).show(10)
//textFile.show(10)
tweetDF.show(10)
print(tweetDF.count())
val trumpTweets= tweetDF.filter($"text".contains("Trump")).select($"text")
//print(trumpTweets.count())
trumpTweets.write.format("csv").save("file:///tmp/test/output/trump")
/* counts words in each line and calculate min max and mean*/
spark.udf.register("lineWordCount", (arg1: String)=>lineWordCount(arg1))
val textLengthDF=textFile.withColumn("value",expr("lineWordCount(value)"))
/*textLengthDF.show(5)
textLengthDF.describe().show()*/
}
def lineWordCount(text: String): Long={
val word=text.split(" ").map(_.toLowerCase).groupBy(identity).mapValues(_.size)
val counts=word.foldLeft(0){case (a,(k,v))=>a+v}
/* print(word)
print(counts)*/
return counts
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson10_Spark_Application_ETL/Pivot.scala | <gh_stars>0
package org.pengfei.Lesson10_Spark_Application_ETL
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions.first
object Pivot {
def pivotSummary(df:DataFrame):DataFrame={
import df.sparkSession.implicits._
val schema= df.schema
val ls:Dataset[(String,String,Double)]=df.flatMap(row=>{
val metric=row.getString(0)
(1 until row.length).map(i=>{
(metric,schema(i).name,row.getString(i).toDouble)
})
})
val lf=ls.toDF("metric","field","value")
//lf.show(5)
val wf= lf.groupBy("field").pivot("metric",Seq("count","mean","stddev","min","max")).agg(first("value"))
// wf.show(5)
return wf
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson21_Testing/Lesson21_1_Session_Creation.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson21_Testing/Lesson21_1_Session_Creation.scala
package org.pengfei.Lesson21_Testing
object Lesson21_1_Session_Creation {
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_5_Spark_DataSet.scala | package org.pengfei.Lesson04_Spark_SQL
import java.util.Properties
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Encoders, Row, SaveMode, SparkSession}
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.pengfei.Lesson04_Spark_SQL.Lesson04_1_Spark_SQL_Intro.Person
import org.apache.spark.sql.functions.explode
import org.apache.spark.sql.functions.expr
import org.apache.spark.sql.functions._
object Lesson04_5_Spark_DataSet {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().master("local[2]").appName("Lesson4_5_Spark_DataFrame").getOrCreate()
val sc=spark.sparkContext
import spark.implicits._
/** ***********************************************************************************************
* ***********************************4.5 Spark sql DataSet/DataFrame ********************************
* **********************************************************************************************/
/**************************************4.5.1 DataSet Introduction******************************/
/* A Spark Dataset is a group of specified heterogeneous columns, akin to a spreadsheet or a
* relational database table. RDDs have always been the basic building blocks of Spark and
* they still are. But RDDs deal with objects; we might know what the objects are but the
* framework doesn't. So things such as type checking and semantic queries are not possible
* with RDDs. Then came DataFrames, which added schemas; we can associate schemas with
* an RDD. DataFrames also added SQL and SQL-like capabilities.
*
*
* Spark 2.0.0 added Datasets, which have all the original DataFrame APIs as well as compiletime
* type checking, thus making our interfaces richer and more robust. So now we have
* three mechanisms:
* - Our preferred mechanism is the semantic-rich Datasets
* - Our second option is the use of DataFrames as untyped views in a Dataset
* - For low-level operations, we'll use RDDs as the underlying basic distributed objects
*
* RDD uses java serialization or Kryo to serialize the objects
* DataSet uses a specialized Encoder to serialize the objects. Encoders are code generated
* dynamically and use a format that allows Spark to perform many operations like filtering,
* sorting and hashing without deserializing the bytes back into an object.
*
*
* */
/**************************************4.5.2 DataSet Creation******************************/
/* DataSet can be created from various source such as :
* - Existing RDDs
* - Structured/Semi-Structured data files
* - External DataBases
* - Tables in Hive
* */
// DSCreationOperation(spark)
/******************************4.5.3 Processing Data with dataset API(Transformation)**************************/
/* In spark sql api, we have two different ways to analyze/process data.
* - Use embedded sql/hiveSql(no compile time check/ has to create view for data)
* - Use dataset/dataframe api.
* The dataset api can be divided into two categories transformation, action
* In transformation, we have
* - General transformation : map, filter, flatMap, mapPartitions, mapPartitionsWithIndex, groupBy, cube, rollup,
* sortBy, select
*
* - Math/Statistical transformation : sample, randomSplit
*
* - Set/Relational transformation : union, intersection, subtract, distinct, cartesian, zip, join.
*
* - DataStructure/I/O transformation : keyBy, zipWithIndex(m), zipWithUniqueID(m), zipPartitions(m),
* coalesce, repartition, repartitionAndSortWithinPartions(m), pipe(m)
*
* The dataset api provides 5 types of operations, we will illustrate them all in the following code
*
* */
// DSTransformationOperation(spark)
/******************************4.5.4 Processing Data with dataset API (Action)**************************/
/* In actions, we have:
* General action : reduce, collect, aggregate, fold, first, take, forEach, top, treeAggregate(m), treeReduce,
* forEachPartition, collectAsMap
*
* Math/Statistical action : count, takeSample, max, min, sum, histogram(m), mean, variance, setdev,
* sampleVariance(m), countApprox(m), countApproxDistinct(m)
*
* Set Theory/Relational action : takeOrdered
*
* DataStructure/I/O action : saveAsTextFile, saveAsSequenceFile, saveAsObjectFile, saveAsHadoopDataSet,
* saveAsHadoopFile, saveAsNewAPIHadoopDataSet, saveAsNewAPIHadoopFile
*
* */
// DSActionOperations(spark)
/********************************* 4.5.5 DataSet to rdd **********************************************/
// DSRDDOperations(spark)
/********************************* 4.5.6 Dataset Built-in Functions ******************************************/
/*The built-in function examples are in the Lesson4_6 */
/**********************************4.5.7 DataSet output *********************************************/
DSOutputOperations(spark)
}
def DSCreationOperation(spark:SparkSession):Unit={
/***********4.5.2.1 Create DataSet with RDDs***************/
val sc=spark.sparkContext
import spark.implicits._
val personRdd=sc.parallelize(List(
Person(0, "Alice", 30),
Person(1, "Bob", 30),
Person(2, "Charles", 21),
Person(3, "Defence", 20),
Person(4, "Eletro", 58),
Person(5, "Bob", 40)))
/* The simplest way is to use spark implicit conversion*/
val personDS=personRdd.toDS()
//println(s"personDS value is : ${personDS.collect().mkString(",")}")
personDS.show()
/* CreateDataSet() is a good way to create a Dataset from an RDD.*/
val personDS1=spark.createDataset(personRdd)
//println(s"personDS1 has type: ${personDS1.getClass.getName}")
//println(s"personDS1 value is : ${personDS1.collect().mkString(",")}")
personDS1.show()
/* We can use Encoders to describe the schema of strongly-typed datasets (only works with types such as
* INT, case class, etc.). To check the schema of dataset objects, we can use .schema*/
//Encoders.INT.schema.printTreeString()
//Encoders.product[Person].schema.printTreeString
//println(s"The schema of personDS1 : ${personDS1.schema}")
val rawRdd=sc.parallelize(List(
Row(0, "Alice", 30),
Row(1, "Bob", 30),
Row(2, "Charles", 21),
Row(3, "Defence", 20),
Row(4, "Eletro", 58),
Row(5, "Bob", 40)
))
/* We have seen how to convert a rdd of case class object(structure data) to dataset, now we will try to
* give a schema of semi-structure data
*
* The schema for a dataset can be specified with an instance of StructType, which is a case class.
* A StructType object contains a sequence(e.g. Seq, Array) of StructField objects. StructField is
* also defined as a case class. It is used to specify the name and data type of a column, and optionally
* whether a column can contain null values and its metadata. The types of structField are defined in
* org.apache.spark.sql.types._ */
val personSchema=StructType(Array(
StructField("Id", LongType, true),
StructField("Name", StringType, true),
StructField("Age", IntegerType, true)
))
/* StructType offers printTreeString method that print the schema in tree form.*/
//personSchema.printTreeString()
//The createDataset method can't take rowRDD as input, it can only take not row rdd.
//val personDS2=spark.createDataset(rawRdd)
//println(s"personDS2 schema is : ${personDS2.schema}")
/*To add schema, we have to use createDataFrame, because all createDataset did not use schema. The createDataFrame
* method which can use schema requires the rdd must be a rdd or rows(RDD[row])*/
val personDS3=spark.createDataFrame(rawRdd,personSchema)
println(s"personDS3 schema is : ${personDS3.schema}")
/***********4.5.2.2 Create DataSet with files***************/
/* */
/* Read csv with a schema*/
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val personCSVPath=s"${path}/spark_lessons/Lesson04_Spark_SQL/person.csv"
//val personHdfsPath="hdfs://<namenode-ip>:<port>/path/to/file"
//val personS3Path="s3a://<bucket_name>/path/to/file"
/* The format option specifies the file format, it can be csv, parquet, orc,JSON
* - parquet : org.apache.spark.sql.parquet
* - csv : org.apache.spark.sql.csv
* - json : org.apahce.spark.sql.json
* - orc : org.apahce.spark.sql.orc
*
* The loar option specifies the file path, it can be local file, remote hdfs, s3. etc.*/
val personDS4=spark.read.format("csv").option("delimiter",",").schema(personSchema).load(personCSVPath)
personDS4.show()
println(s"personDS4 schema is : ${personDS4.schema}")
/* Read csv with column name header, without schema, let the spark read method to infer a schema automaticlly*/
val personCSVWithHeadPath=s"${path}/spark_lessons/Lesson04_Spark_SQL/person_with_head.csv"
val personDS5=spark.read.option("header","true").option("inferSchema","true").csv(personCSVWithHeadPath)
personDS5.show()
println(s"personDS5 schema is : ${personDS5.schema}")
/* Read files in all format */
/*val personParquetPath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson04_Spark_SQL/person.parquet"
val personDS6=spark.read.option("header","true").option("inferSchema","true").parquet(personParquetPath)
personDS6.show()
println(s"personDS6 schema is : ${personDS6.schema}")
val personOrcPath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson04_Spark_SQL/person.orc"
val personDS7=spark.read.option("header","true").option("inferSchema","true").orc(personOrcPath)
personDS7.show()
println(s"personDS7 schema is : ${personDS7.schema}")
val personJsonPath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson04_Spark_SQL/person.json"
val personDS8=spark.read.option("header","true").option("inferSchema","true").json(personJsonPath)
personDS8.show()
println(s"personDS8 schema is : ${personDS8.schema}")*/
/* Use sql query on parquet file directly, */
//val personDS6=spark.sql("SELECT * FROM parquet.'/home/pliu/data_set/spark_data_set/spark_lessons/Lesson04_Spark_SQL/person.parquet'")
/***********4.5.2.3 Create DataSet from database***************/
/* Spark SQL has built-in support for JDBC-compliant databases. External packages are available for
* other data sources.*/
/*val jdbcDf=spark.read.format("org.apache.spark.sql.jdbc").options(Map(
"url" -> "jdbc:postgresql://127.0.0.1:5432/northwind?user=pliu&password=<PASSWORD>",
"dbtable" -> "public.employees"
)).load()
jdbcDf.show(5)*/
// We can also use option for each connection info
/* val taoBaoDF=spark.read.format("jdbc")
.option("url","jdbc:postgresql://127.0.0.1:5432/dbtaobao")
.option("dbtable","public.user_log")
.option("user","pliu")
.option("password","<PASSWORD>").load()
taoBaoDF.show()*/
/* We can also use the following syntax*/
/*val connectionProperties= new Properties()
connectionProperties.put("user","pliu")
connectionProperties.put("password","<PASSWORD>")
val taoBaoDF1=spark.read.jdbc("jdbc:postgresql://127.0.0.1:5432/dbtaobao","public.user_log",connectionProperties)
taoBaoDF1.show(5)*/
/****************4.5.2.4 Create DataSet from hive table*********************/
/* The following code for read hive table are not tested*/
/* First, Create a spark session which support hive.
* Then there are two ways to read hive table:
* - use sparkSession.table method -> returns all data of one table as a dataset
* - use sparkSession.sql("sql statement") -> returns the result data of the sql statement as a dataset*/
/*val localWarehouse="file:///tmp/spark-warehouse"
val sparkHive=SparkSession
.builder()
.appName("spark_access_hive_table")
.config("hive.metastore.uris","thrift://127.0.0.1:9083")
// hive.metastore.warehouse.dir is depracted since spark 2.0, use spark.sql.warehouse instead
.config("spark.sql.warehouse.dir",localWarehouse)
//.config("spark.sql.catalogImplementation","hive")
.enableHiveSupport()
.getOrCreate()
val df1 = sparkHive.read.table("hive-table-name")
val df2 = sparkHive.sql("select * from hive-table-name")*/
}
def DSTransformationOperation(spark:SparkSession):Unit={
import spark.implicits._
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val personParquetPath=s"${path}/spark_lessons/Lesson04_Spark_SQL/person.parquet"
val personDS=spark.read.option("header",true).option("inferSchema","true").parquet(personParquetPath)
personDS.show()
val sales = Seq(
("Warsaw", 2016, 100),
("Warsaw", 2017, 200),
("Boston", 2015, 50),
("Boston", 2016, 150),
("Toronto", 2017, 50),
("Toronto", 2017, 50)
).toDF("city", "year", "amount")
val sales1=Seq(
("Beijin", 2016, 100),
("Warsaw", 2017, 200),
("Boston", 2015, 50),
("Benxi", 2016, 150),
("Toronto", 2017, 50),
("GuangZhou", 2017, 50)
).toDF("city", "year", "amount")
/**************************************4.5.3.1 Basic operations for exploring data *********************/
/**************************************Cache/persist dataset**********************************/
/*The cache method stores source dataset in memory using a columnar format. It scans only the required
* columns and stores them in compressed in-memory columnar format. Spark automatically selects a compress
* codec for each column based on data statistics*/
personDS.cache()
/* We can tune the spark caching by adding the following option*/
spark.conf.set("spark.sql.inMemoryColumnarStorage.compressed","true")
spark.conf.set("spark.sql.inMemoryColumnarStorage.batchSize","10000")
/* By default, compression is turned on and the batchSize is 10,000*/
/* With cache(), we can only use the default(memory_only) storage level. With persist(), you can specify
* many different storage level. The following example, we use memory and disk to store the dataset with a
* serilization object*/
//personDS.persist(StorageLevel.MEMORY_AND_DISK_SER_2)
/**********************************Columns and rows of dataset ************************/
/* Row is a Spark Sql abstraction for representing a row of data. Conceptually, it's equivalent to a relational
* tuple or row in a table */
/* An example of row of an object*/
val row1=Row(Person(6,"haha",28))
val p1=row1.get(0)
println(s"Person p1 from row 1 has value: ${p1.toString}")
/* An example of row of string*/
val row2=Row(7,"foo",18)
val pid=row2.getInt(0)
val pname=row2.getString(1)
val page=row2.getInt(2)
println(s"Person p2 form row2 has value: pid: ${pid}, pname: ${pname}, page: ${page}")
/* The columns method returns the names of all the columns in the dataset as an array of String*/
val columns=personDS.columns
println(s"Columns name of personDS: ${columns.mkString(",")}")
/***********************************Dtypes of dataset ***************************/
/* dtypes method returns the data types of all the columns in the dataset as an array of tuples. The
* first element of the tuple is the name, second element is the type of a column */
val columnsWithType =personDS.dtypes
println(s"Columns with types of personDS: ${columnsWithType.mkString(",")}")
/******************************** Explain *************************************/
/* The explain method prints the physical plan on the console. It is useful for debugging*/
//personDS.explain(false)
/***************************** printSchema ************************************/
/* The printSchema method prints the schema of the source DataFrame on the console in a tree format*/
personDS.printSchema()
/**********************************toDF****************************************/
/* We can use toDF to rename column name, it's better to use withColumns if you need to do more on the column*/
val renameDS= personDS.toDF("SId","SName","SAge")
renameDS.printSchema()
/**************************************4.5.3.2 Transformation operations *************************************/
/****************************************Distinct*********************************************************/
/* The distinct method returns a new Dataframe containing only the unique rows in the source dataset*/
val salesWithoutDuplicates = sales.distinct()
salesWithoutDuplicates.show()
/**************************************** filter **************************************************/
/* The filter method filters rows in the source DataSet using a sql expression provided to it as an argument.
* It returns a new DataSet containing only the filtered rows. The SQL expression can be passed as a string argument */
val filteredSales=sales.filter("amount > 100")
filteredSales.show()
// A variant of the filter method allows a filter condition to be specified using the Column type. you can also
// replace the $"year" by sales("year")
val filteredSales1=sales.filter($"year" > 2010)
filteredSales1.show()
/******************************************Intersect****************************************************/
/* The intersect method takes a DataFrame as an argument and returns a new DataFrame containing only
* the rows in both the input and source DataFrame.*/
val commonRows=sales.intersect(sales1)
commonRows.show()
/******************************************Join*******************************************************/
/* Join method performs a SQL join of the source DataFrame with another DataFrame. It takes three
* arguments, a DataFrame, a join expression and a join type.*/
val customer=List(Customer(11, "Jackson", 21, "M"),
Customer(12, "Emma", 25, "F"),
Customer(13, "Olivia", 31, "F"),
Customer(4, "Jennifer", 45, "F"),
Customer(5, "Robert", 41, "M"),
Customer(6, "Sandra", 45, "F")).toDF()
val transactions= List(Transaction(1, 5, 3, "01/01/2015", "San Francisco"),
Transaction(2, 6, 1, "01/02/2015", "San Jose"),
Transaction(3, 1, 6, "01/01/2015", "Boston"),
Transaction(4, 200, 400, "01/02/2015", "Palo Alto"),
Transaction(6, 100, 100, "01/02/2015", "Mountain View")).toDF()
// the join type can be inner, outer, left_outer, right_outer,
val innerDF = transactions.join(customer, transactions("cId")===customer("cId"),"inner")
innerDF.show()
/*******************************************Limit *******************************************************/
/* The limit method returns a dataset containing the specified number of rows from the source dataset. */
val fiveCustomer=customer.limit(5)
fiveCustomer.show
/*****************************************orderBy******************************************************/
/* The orderBy method returns a DataFrame sorted by the given columns. It takes the names of one or more
* columns as arguments.
*
* By default, the orderBy sorts in ascending order. You can explicitly specify the sorting order by adding
* .desc or .asc*/
val sortCustomerAge=customer.orderBy("age")
sortCustomerAge.show()
// sort with multi column
customer.orderBy($"cId".asc,$"age".desc).show()
/********************************************randomSplit*********************************************/
/* The randomSplit method splits the source dataset into multiple datasets. It takes an array of weights as
* argument and returns an an array of datasets. It is a useful method for split dataset into
* training data and test data for machine learning.*/
val splitCustomerDS = customer.randomSplit(Array(0.4,0.3,0.3))
println(s"Split customer dataset, fist part count: ${splitCustomerDS(0).count}, " +
s"2nd part count: ${splitCustomerDS(1).count}, 3rd part count: ${splitCustomerDS(2).count}")
/***************************************************sample********************************************/
/* The sample method returns a DataFrame containing the specified fraction of the rows in the source
* DataFrame. It takes two arguments. The first argument is a Boolean value indicating whether sampling should
* be done with replacement. The second argument specifies the fraction of the rows that should be returned.*/
val sampleSales=sales.sample(true,0.10)
//sampleSales.show()
/**************************************************select/selectExpr *********************************************/
/* The select method returns a DataFrame containing only the specified columns from the source DataFrame. You can
* also modify the value of columns with select. For more complexe functions, we can use select Expr. The
* selectExpr method accepts one or more SQL expressions as arguments and returns a DataFrame generated by
* executing the specified SQL expressions.
* */
println("********************************select/selectExpr***************************************")
val nameAgeCustom=customer.select($"name",$"age"+10)
nameAgeCustom.show()
val newCustomerDF=customer.selectExpr("name","age+10 AS age_10","IF(gender='F', true, false) As female")
newCustomerDF.show()
/******************************************** withColumn ***********************************************/
/* The withColumn method adds a new column to or replaces an existing column in the source DataSet and returns a
* new dataset. It takes two arguments, the first argument is the name of the new column and the second argument
* is an expression for generating the value of the new column. */
// if the expression is simple, you can put it directly in the withColumn method
println("*************************************withColumn***********************************************")
val productDS=List(Product(1,"iPhone",600.00,400.00),
Product(2,"iPad",400.00,300.00),
Product(1,"Dell",500.00,400.00)).toDF()
val productProfit=productDS.withColumn("profit",$"price"-$"cost")
productProfit.show()
/*if the logic in the expression is complex, you can define a function then call the function
* To do this there are three steps.
* 1. define the function
* 2. register the function to spark udf
* 3. call the function in withColumn with funciton expr (You need to import org.apache.spark.sql.functions.expr)
* */
/* Step 1*/
def getProfit(price:Double,cost:Double):Double={
return price-cost
}
/*Step 2, first argument is the name of the registered function in udf. second is the lambda exp
* which call the real function.*/
spark.udf.register("getProfit",(price:Double,cost:Double)=>getProfit(price,cost))
/*Step 3. In the expr, you can use directly the column name as argument in the udf registered function*/
val productProfitUdf=productDS.withColumn("profit",expr("getProfit(price,cost)"))
productProfitUdf.show()
}
def DSActionOperations(spark:SparkSession):Unit={
import spark.implicits._
val sales = List(SalesSummary("01/01/2015", "iPhone", "USA", 40000),
SalesSummary("01/02/2015", "iPhone", "USA", 30000),
SalesSummary("01/01/2015", "iPhone", "China", 10000),
SalesSummary("01/02/2015", "iPhone", "China", 5000),
SalesSummary("01/01/2015", "S6", "USA", 20000),
SalesSummary("01/02/2015", "S6", "USA", 10000),
SalesSummary("01/01/2015", "S6", "China", 9000),
SalesSummary("01/02/2015", "S6", "China", 6000)
).toDF()
sales.show()
/************************************** 4.5.4.1 Basic Actions **********************************/
/************************************Apply***********************************************/
/* The apply method takes the name of a column as an argument and returns the specified column in the source dataset
* as an instance of Column class. The Column class provides operators for manipulating a column in a DataSet*/
val revenueColumn=sales.apply("revenue")
println(s"revenueColumn has value : ${revenueColumn.toString()}")
val halfRevenueColumn=revenueColumn*0.5
println(s"HalfRevenueColumn has value : ${halfRevenueColumn.toString()}")
/* Scala provides syntactic sugar that allows you to use sales("year") instead of sales.apply("year").
* It automatically converts sales("year") to sales.apply("year"). So the preceding code can be
* rewritten, val yearColumnMinus10=sales("year")-10
*
* Column class is generally used as an input to some of the dataFrame methods or functions defined in the spark SQL
* library.
* For example sales.sum("amount") can also be written as sum(sales.apply("amount")), sum(sales("amount")) or
* sum($"amount")*/
/******************************************* Collect **************************************************/
/* The collect method reutrns the data in a dataset as an array of Rows. Be careful, when you do collect with
* big dataset. it may explose your sparkdriver memory when all the workers sends back results. */
//println(s"result of sales after collect: ${sales.collect().mkString(",")}")
/*********************************************count ******************************************************/
/* Count method returns the number of rows in the dataset*/
val count=sales.count()
/********************************************describe*********************************************/
/* The describe method can be used for exploratory data analysis. It returns summary statistics for numeric columns
* in the source dataset. The summary statistics includes min, max, count, mean, and standard deviation. It can take
* one or more column as arguments and return the summary as a dataset*/
sales.describe("revenue").show()
/*******************************************first/take**********************************************/
/* The first method returns the first row in the source DataFrame. In agg(), you can also use first to get the first
* element of a column*/
val firstRow=sales.first()
/* The take method takes an integer N as an argument and returns the first N rows from the source DataFrame
* as an array of Rows.*/
val firstTwoRows= sales.take(2)
/*****************************************groupby**************************************************/
/* The groupBy method groups the rows in the source dataset using the columns provided to it as arguments.
* Aggregation can be performed on the grouped data returned by this method
*
* The aggregation method can be count, min, max, sum,*/
val countCity = sales.groupBy("country").count()
countCity.show()
val salesAmountOfCity= sales.groupBy("country").sum("revenue")
salesAmountOfCity.show()
/********************************************* Cube *****************************************************/
/* The cube method takes the names of one or more columns as arguments and returns a cube for multi-dimensional
* analysis. It is useful for generating cross-tabular reports.
*
* Assume you have a dataset that tracks sales along three dimensions: time, product and country. The cube method
* allows you to generates for all possible combinations of the dimensions that you are interested in.
*
* It works like a super groupby, it does groupby on all the target column and return all possible groupby result
* In the following example, it groupby date, product, country. The fist row of the result dataset
* |01/01/2015| null| USA| 60000.0| is a groupby of rows where date=01/01/2015 and country=USA, as the
* product countains both iPhone and S6 so it returns null. The sum("revenue") is the sum of revenue of all
* rows where date=01/01/2015 and country=USA */
val salesCubeDf=sales.cube($"date",$"product",$"country").sum("revenue")
println("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%Stat of cube%%%%%%%%%%%%%%%%%%%%%")
salesCubeDf.show()
/* If you want to find the total sales of all products in the USA, you can use the following filter query*/
salesCubeDf.filter("product IS null AND date IS null AND country='USA'").show()
/* If you want to know the subtotal of sales by product in the USA, you can use the following filter query*/
salesCubeDf.filter("date IS null AND product IS NOT null AND country='USA'").show()
/****************************************rollup***********************************************/
/* The rollup method takes the names of one or more columns as arguments and return a multi-dimensional
* rollup. It is useful for sub-aggregation along a hierarchical dimension such as geography or time*/
val salesByCity = List(SalesByCity(2014, "Boston", "MA", "USA", 2000),
SalesByCity(2015, "Boston", "MA", "USA", 3000),
SalesByCity(2014, "Cambridge", "MA", "USA", 2000),
SalesByCity(2015, "Cambridge", "MA", "USA", 3000),
SalesByCity(2014, "Palo Alto", "CA", "USA", 4000),
SalesByCity(2015, "Palo Alto", "CA", "USA", 6000),
SalesByCity(2014, "Pune", "MH", "India", 1000),
SalesByCity(2015, "Pune", "MH", "India", 1000),
SalesByCity(2015, "Mumbai", "MH", "India", 1000),
SalesByCity(2014, "Mumbai", "MH", "India", 2000)).toDF()
val salesRollup=salesByCity.rollup($"country",$"state",$"city").sum("revenue")
salesRollup.show()
val salesCube=salesByCity.cube($"country",$"state",$"city").sum("revenue")
salesCube.show()
val salesGroupBy=salesByCity.groupBy($"country",$"state",$"city").sum("revenue")
salesGroupBy.show()
/**************************GroupBy vs cube vs Rollup ***********************************/
/* We can notice that the functionality and result of groupBy, cube, rollup are very similar. They are all
* inspired from the sql.
*
* groupBy : is used to group the results of aggregate functions according to a specified column. But it does not
* perform aggregate operation on multiple levels of a hierarchy of columns. For example, you can
* calculate the total of all employee salaries for each department in a company (one level of hierarchy)
* but you cannot calculate the total salary of all employees regardless of the department they work in
* (two levels of hierarchy).
*
* rollup : extend the functionality of groupBy by calculating subtotals and grand totals for a set of
* columns with respect of hierarchy(column orders)
*
* cube : cube is similar to rollup, but it will calculate all possible subtotals and grand totals for
* all possible permutations of the columns*/
}
/**********************************************************************************************************
* **************************************************4.5.5 DataSet to rdd***********************************
* ************************************************************************************************/
def DSRDDOperations(spark: SparkSession):Unit={
val sc=spark.sparkContext
import spark.implicits._
val customerDF=List(Customer(11, "Jackson", 21, "M"),
Customer(12, "Emma", 25, "F"),
Customer(13, "Olivia", 31, "F"),
Customer(4, "Jennifer", 45, "F"),
Customer(5, "Robert", 41, "M"),
Customer(6, "Sandra", 45, "F")).toDF()
/* The DataFrame class supports commonly used RDD operations such as map, flatMap, foreach,
* foreachPartition, mapPartition, coalesce, and repartition. These methods work similar to their
* namesake operations in the RDD class.
*
* In addition, if you need access to other RDD methods that are not present in the DataFrame class, you
* can get an RDD from a DataFrame. This section discusses the commonly used techniques for generating an
* RDD from a DataFrame.*/
// convert dataset to rdd
val customerRdd= customerDF.rdd
// get firstRow of an rdd
val firstRow=customerRdd.first()
// get Customer name, second element of the tuple
val name=firstRow.getString(1)
// get Age, third element of the tuple
val age=firstRow.getInt(2)
// get an rdd with only name and age, Rdd is not strongly typed, here we must use Row instead of Customer
val NameAge=customerRdd.map{case Row(cId:Long,name:String,age:Int,gender:String)=> (name,age)}
// the below code does not work.
//val NameAge=customerRdd.map{case Customer(cId:Long,name:String,age:Int,gender:String)=> (name,age)}
println(s"NameAge has value: ${NameAge.collect().mkString(",")}")
}
/**********************************************************************************************************
* *********************************************4.5.7 DataSet output operations**************************
* ************************************************************************************************/
def DSOutputOperations(spark:SparkSession):Unit={
import spark.implicits._
val personDS=spark.sparkContext.parallelize(List(
Person(0, "Alice", 30),
Person(1, "Bob", 30),
Person(2, "Charles", 21),
Person(3, "Defence", 20),
Person(4, "Eletro", 58),
Person(5, "Bob", 40))).toDF()
/*****************************************4.5.7.1 Write dataset on disks as files***************************/
/* The dataset write methode can write dataset in files with format json, parquet, orc, csv, .
* While saving a dataset, if the destination path or table
* already exists, spark sql will throw an class. You can use the mode method in DataSetWriter to change this.
* It takes an argument saveMode which specifies the behavior if the destination already exists.
* There are four SaveMode:
* - error (default) : throw an exception if destination exists.
* - append : append to existing data if exists.
* - overwrite : overwrite existing data
* - ignore : ignore the write operation if exists.
*
* */
/*personDS.coalesce(1).write.mode(SaveMode.Overwrite).json("/tmp/personDS")
personDS.coalesce(1).write.mode(SaveMode.Overwrite).parquet("/tmp/personParquet")
personDS.coalesce(1).write.mode(SaveMode.Overwrite).orc("/tmp/personORC")*/
/* The output of write method is a directory, not a single file. The number of files which contains the real data is
* depends on the partitions of the dataset. There are three method can change the output file number(partition number)
* - coalesce(partitionNum:Int) : can only reduce the partition number
*
* - repartition(partitionNum:Int,col:Column) : can both increase and reduce partition number. The col argument
* will be used as a partition key. The partition key can help spark to
* determine each row belongs to which partition. This can increase
* the analytics speed. The two argument are both optional.
*
* - partitionBy(colName:String) : Let spark determine the best partition number by using the colName as partition key
* It only increase the partition number, For RDD only
*
* We commonly use date or time column to partition data
* */
// personDS.repartition(6,$"Id").write.mode(SaveMode.Append).text("/tmp/personDSText")
// we can also use save method to write files
// personDS.write.format("parquet").mode(SaveMode.Overwrite).save("/tmp/savePersonParquet")
/*****************************************4.5.7.2 Write dataset on database***************************/
/*The simplest way is to use save method, we use format jdbc, and option method to specify connection information. The
* SaveMode is required if you will write existing table to avoid exception*/
/*personDS.write.format("jdbc")
.option("url","jdbc:postgresql://127.0.0.1:5432/dbtaobao")
.option("dbtable","public.test_person")
.option("user","pliu")
.option("password","<PASSWORD>").mode(SaveMode.Ignore).save()*/
/* Second way is to use options method to specify connection information */
/* personDS.write.format("jdbc")
.options(Map("url" -> "jdbc:postgresql://127.0.0.1:5432/dbtaobao?user=pliu&password=<PASSWORD>",
"dbtable" -> "public.test_person")).mode(SaveMode.Overwrite).save()*/
/* Third way is to use write.jdbc method, it takes three arguments, 1st is the url of db, 2nd is the schema.table_name,
* 3rd is the connection properties*/
/*val connectionProperties= new Properties()
connectionProperties.put("user","pliu")
connectionProperties.put("password","<PASSWORD>")
personDS.write.mode(SaveMode.Append)
.jdbc("jdbc:postgresql://127.0.0.1:5432/dbtaobao","public.test_person",connectionProperties)*/
/* TroubleShooting
* - The JDBC driver class must be visible to the primordial class loader on the client session and on all executors.
* This is because Java’s DriverManager class does a security check that results in it ignoring all drivers not
* visible to the primordial class loader when one goes to open a connection. One convenient way to do this is
* to modify compute_classpath.sh on all worker nodes to include your driver JARs.
*
* - Some databases, such as H2, convert all names to upper case. You’ll need to use upper case to refer to
* those names in Spark SQL.
* */
/*****************************************4.5.7.3 Write dataset on hive metastore***************************/
/* To read or write data on hive, we need to include all hive dependencies in spark driver and all worker node.
* Because these dependecies will be needed in worker node to access the hive serialization and deserialization lib
* in oreder to access hive datastore*/
/*The below code are not tested!!!!!! */
/* We need also enable hive support in the spark session*/
/*val spark=SparkSession.builder()
.appName("spark Hive example")
.config("spark.sql.warehouse.dir","/tmp/spark-warehouse")
.enableHiveSupport()
.getOrCreate()*/
//save dataset to hive table
// personDS.write.mode(SaveMode.Overwrite).saveAsTable("hive_table_name")
}
case class Person(Id: Long, Name: String, Age: Int)
case class EmailStringBody(sender:String, recepient:String,subject:String,body:String)
case class EmailArrayBody(sender: String, recepient: String, subject: String, body: Array[String])
case class SalesSummary(date:String,product:String,country:String,revenue:Double)
case class Customer(cId:Long,Name:String,age:Int,gender:String)
case class Transaction(tId: Long, cId: Long, prodId: Long, date: String, city: String)
case class SalesByCity(year: Int, city: String, state: String,
country: String, revenue: Double)
case class Product(id:Int,name:String,price:Double,cost:Double)
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson12_Predicting_Forest_Cover_With_Decision_Trees/Lesson12_Predicting_Forest.scala | package org.pengfei.Lesson12_Predicting_Forest_Cover_With_Decision_Trees
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.classification.{DecisionTreeClassifier, RandomForestClassifier}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{VectorAssembler, VectorIndexer}
import org.apache.spark.ml.tuning.{ParamGridBuilder, TrainValidationSplit}
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.ml.classification.RandomForestClassificationModel
import scala.util.Random
object Lesson12_Predicting_Forest {
/**************************************************************************************************************
* **********************************12.1 Predicting Forest with decision tree *******************************
* ***********************************************************************************************************/
/* The data set used in this Lesson is the well know Covtype data set, you can download it from :
* https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/ It contains a compressed CSV-format data
* file, covtype.data.gz, and accompanying info file, covtype.info.
*
* This data set records the types of forest-covering parcels of land in Colorado, USA. Each example contains
* several features describing each parcel of land (e.g. elevation, slope, distance to water, shade, and soil type)
* along with the known forest type covering the land. The forest cover type is to be predicted from the rest of the
* features, of which there are 54 in total
*
* This data set contains both categorical and numeric features. There are 581,012 examples in the data set, which
* does not exactly qualify as big data, but it still highlight some issues of scale.*/
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson12_Predicting_Forest_Cover").master("local[2]").getOrCreate()
import spark.implicits._
/************************************* 12.2 Preparing the data ***************************************************/
/* The data file does not have column names in header, it's specified in the covtype.info file
* Conceptually, each column of a CSV file has a type as well (e.g. number, string) but it's not explicit.
* We need to define the schema ourself*/
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val filePath=s"${path}/spark_lessons/Lesson12_Predicting_Forest_Cover/covtype.data"
val dataWithoutHeader = spark.read.option("inferSchema", true).option("header",false).csv(filePath)
// dataWithoutHeader.show(5)
val colNum=dataWithoutHeader.columns.length
val rowNum=dataWithoutHeader.count()
println(s"covtype data set has ${rowNum} rows, and ${colNum} columns")
/* We could notice that some columns are numeric, some columns are binary(0,1). The binary columns (a group of them)
* represent an actual categorical column by using "one-hot (1-of-n) encoding". For example, a categorical feature
* for weather that can be cloudy, rainy, or clear would become three binary features(cloudy, rainy, clear), for
* each row, if rainy then cloudy column = 0, clear = 0 and rainy = 1.
*
* Be careful when encoding a categorical feature as a single numeric feature. The original categorical values
* have no ordering, but when encoded as a number, they appear to. Treating the encoded feature as numeric
* leads to meaningless results because the algorithm is effectively pretending that rainy is somehow greater
* than, and two times larger than, cloudy. It’s OK as long as the encoding’s numeric value is not used as a
* number.*/
/* Based on the covtype.info file, the column names show in order in the following sequence. */
val colNames=Seq("Elevation","Aspect","Slope",
"Horizontal_Distance_To_Hydrology", "Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points")
// Above is the numeric feature
// 5 columns of one-hot encoding categorical feature about wilderness area
.union((0 until 4).map(i => s"Wilderness_Area_$i"))
// The 41 columns of one-hot encoding categorical feautre about soil type
.union((0 until 40).map(i => s"Soil_Type_$i"))
// The label column of tree cover type
.union(Seq("Cover_Type"))
// union can be replaced by ++, but it must be on the same line
// println(s"colNames: ${colNames.toString()}")
val temp=dataWithoutHeader.toDF(colNames:_*)
val rawData=temp.withColumn("Cover_Type", temp("Cover_Type").cast("double"))
/* show all the possible cover_type(label) value*/
// rawData.select("Cover_Type").distinct().show()
// rawData.show(5)
val Array(trainData,testData)=rawData.randomSplit(Array(0.9,0.1))
trainData.cache()
testData.cache()
/********************************** 12.3.1 Build first decision tree model ****************************************/
/* In this lesson, we only used spark.ml Api, we don't use spark.mllib Api. It's quite different. Be ware of that.*/
// x=>x can be replaced by _ , so filter(_ !="Cover_Type") works too
val inputCols=trainData.columns.filter(x=>x!="Cover_Type")
//Build a assembler which can transform dataframe to a vector
val assembler = new VectorAssembler().setInputCols(inputCols).setOutputCol("featureVector")
val assembledTrainData= assembler.transform(trainData)
//assembledTrainData.select("featureVector").show(5)
// It shows (54,[0,1,2,3,4,5,...]) 54 is the size of the vector, [....] is the vector of features
/* VectorAssembler is an example of Transformer within the current Spark ML “Pipelines” API. It transforms
* another DataFrame into a DataFrame, and is composable with other transformations into a pipeline. Later in
* this Lesson, these transformations will be connected into an actual Pipeline. Here, the transformation is just
* invoked directly, which is sufficient to build a first decision tree classifier model.*/
val classifer = new DecisionTreeClassifier()
.setSeed(Random.nextLong())
.setLabelCol("Cover_Type")
.setFeaturesCol("featureVector")
.setPredictionCol("prediction")
val model=classifer.fit(assembledTrainData)
/*this shows decision tree structure, which consists of a series of nested decisions about features.*/
//println(model.toDebugString)
/* decision trees are able to assess the importance of the input features as part of their building process. They
can estimate how much each feature contriburtes to making correct predictions.*/
// model.featureImportances.toArray.zip(inputCols).sorted.reverse.foreach(x=>println(s"feature name : ${x._2} has importance: ${x._1}"))
/* The higher the importance value is better. Elevations seems to be the most important feature. Most features are
* estimated to have no importance.
*
* The resulting DecisionTreeClassificationModel is itself a transformer because it can transform a data frame
* containing feature vectors into a data frame also containing predictions. */
/* We can do predictions on training data and compare its prediction with the known correct cover type*/
// transform here act as a predication
val predictions=model.transform(assembledTrainData)
predictions.select("Cover_Type","prediction","probability").show(truncate = false)
/* The output also contains a "probability" column that gives the model's estimate of how likely it is that
* each possible outcome is correct. This shows that in these instances, it’s fairly sure the answer is 3 in
* several cases and quite sure the answer isn’t 1
*
* We know that the cover_type only has 7 possible values(1 until 7), but the probability outcomes contains 8 values
* That's because of the first element of the probability vector always starts with index 0, so we can ignor it as
* we don't have cover_type 0.*/
/* We could notice that, there are many predictions are wrong. So we need to work on the hyperparameters of the
* decisionTreeClassifier.
*
* MulticlassClassificationEvaluator can compute accuracy and other metrics that evaluate the quality of the model's
* predictions. It's an example of an evaluator in spark ml, which is responsible for assessing the quality of
* an output dataframe in some way*/
/* val evaluator=new MulticlassClassificationEvaluator()
.setLabelCol("Cover_Type")
.setPredictionCol("prediction")*/
// val accuracy=evaluator.setMetricName("accuracy").evaluate(predictions)
// val f1=evaluator.setMetricName("f1").evaluate(predictions)
/*F1 score is specific for binary classification. It consider both the precison(positive predictive value) P and
* the recall(aka. sensitivity) R.
* P is the number of correct positive results divided by the number of all positive results.
* R is the number of correct positive results divided by all relevant samples
*
* In a prediction, you have four type result:
* - 1. true positives
* - 2. false negatives
* - 3. false positives
* - 4. true negatives
* - 5. relevant elements = (true positives)+(false negatives)
* - 6. precision = (true positives)/ (all positives in prediction)
* - 7. recall = (true positives)/ (all relevant elements)
*
* For example, we have a binary classification problem of dogs and not dogs, we have 20 rows in this dataset, and 12
* of them are dogs, but our classifier only identified 8 dogs. 5 of the 8 classification is correct, so
* 5 dogs prediction is correct, 3 dogs prediction is wrong
*
* - all positives = 12 (12 dogs in total in this dataset)
* --- positives in predictions = 8
* ----- true positives = 5 (5 correct dog prediction)
* ----- false positives = 3 (3 wrong dog prediction)
* - all negatives = 8
* --- negatives in predictions = 12
* ----- false negatives = 7 (7 not-dogs in prediction is actually dogs)
* ----- true negatives = 5 (5 correct not-dogs prediction)
*
* - precision = 5/8
* - recall= 5 / (5+7)
*
* */
// println(s"accuracy is ${accuracy}, f1 is ${f1}")
/* Confusion matrix is another good way to evaluate models. A confusion matrix is a table with a row and a column
* for every possible value of the target. In our example, it will be a 7*7 matrix, where each row corresponds to
* an actual correct value, and each column to a predicted value, in order. The entry at row i and column j counts
* the number of times an example with true category i was predicted as category j. So the correct predictions are
* the counts alone the diagonal and the predictions are everything else.
*
* In spark, only spark-mllib api provide this implementation, which is based on the rdds. So we need to transform
* the prediciton:dataframe into RDDs. */
/* as[] convert dataframe to a dataset[(Double,Double)] */
// val predictionRDD=predictions.select("prediction","Cover_Type").as[(Double,Double)].rdd
// val multiClassMetrics=new MulticlassMetrics(predictionRDD)
// val confusionMatrix=multiClassMetrics.confusionMatrix
// println(s"confusionMatrix : \n${confusionMatrix}")
/* The vaules in confusionMatrix will be slightly different each time, because the process of building a decision
* tree includes some random choices that can lead to different classification.
*
* Counts in the diagonal are high, which is good. However, there are certainly a number of misclassifications. For
* example, category 5, 6 are never predicted at all.*/
/* We can also build our confusionMatrix ourselves with the help of pivot method of dataframe*/
// val confusionMatrixDf=predictions.groupBy("Cover_Type").pivot("prediction",(1 to 7)).count.na.fill(0.0).orderBy("Cover_Type")
// confusionMatrixDf.show()
/********************************** 12.4 Decision Tree Hyperparameters ****************************************/
/* With default hyperparameters, our decision tree model has about 70% accuracy. A decision Tree model has the
* following important hyperparameters:
* - maximum depth : It simply limits the number of levels in the decision tree. It is the maximum number of chained
* decisions that the classifier will make to classify an example. It is useful to limit this to
* avoid overfitting the training data.
*
* - maximum bins : It simply limits the total number of sets of values to put in decision rules. For example, for
* numeric feature, we have decision rules like weight >= 100, for categorical feature, we have rules
* like eye-color in (bleu,green) . The set of values such as 100 and (blue, green) are called bins.
* A larger number of bins requires more processing time but might lead to finding a more optimal
* decision rule.
*
* - impurity measure : A good decision rule can divide the training data's target values into relatively homogeneous,
* or "pure" subsets. Picking a best rule means minimizing the impurity of the two subsets it
* induces. There are two common used measures of impurity: Gini impurity and entropy.
*
* - minimum information gain : It's a hyperparameter that imposes a minimum information gain, or decrease in impurity,
* for candidate decision rules. Rules that do not improve the subsets impurity enough
* are rejected. Like a lower maximum depth, this can help the model resist overfitting
* because decisions that barely help divide the training input may in fact not helpfully
* divide future data at all.
* */
/********************************************* 12.5 Tuning Decision Trees *************************************/
/* By modifying the hyperparameters which we mentioned above, we can tune the accuracy of the decision tree model
* To make the test of the best combination of hyperparameters easier, we will create a pipeline which chain the
* VectorAssembler and DecisionTreeClassifier (two transformers) together. */
// DecisionTreePipeLine(trainData)
/************************************************** 12.5.1 Overfitting issue ************************************/
/* As we discussed previously, it's possible to build a decision tree so deep and elaborate that if fits the
* given training example very well or perfectly but fails to generalize to other examples. Because it has fit
* the idiosyncrasies and noise of the training data too closely.
*
* A decision tree has overfit, it will exhibit high accuracy when run on the same training data that it fit the
* model to, but low accuracy on other examples. Here the final model's accuracy was about 91% on new data, but it
* can easily give an 95% accuracy on the train data.
*
* The difference is not large, but suggests that the decision tree has overfit the training data to some extent.
* A lower maximum depth might be a better choice.*/
/***************************************** 12.6 Categorical Features Revisited *******************************/
/* Another way to improve your model is to do feature engieuring. In this section, we will reviste the categorical
* Features.
*
* So far, we have treated all input features as if they're numeric. The label column is encoded as numeric, but has
* actually been correctly treated as a categorical value. All the categorical value has been encoded with one-hot
* code.
*
* For the binary categorical feature, the one-hot encoding will turn into several binary 0/1 values. Treating
* these feature as numeric is fine, because any decision rule on these features will choose thresholds between 0
* and 1, and all are equivalent since all values are 0 or 1.
*
* But for complex features, we need to do more to tune the model. For example, we have nine different soil types,
* and we use many features to describe one soil type. If soil type were encoded as a single categorical feature
* with 40 different soil value, the decision tree model could have rules like "if the soil type is one of the nine
* types" directly. However, when encoded as 40 features, the tree would have to learn a sequence of nine decisions
* on soil type to do the same, this expressiveness may lead to better decisions and more efficient trees.
*
* What about undoing the one-hot encoding? This would replace, for example, the four columns encoding wilderness
* type with one column that encodes the wilderness type as a number between 0 and 3. */
//trainData.show(1)
//undo the one-hot encoding
val unOneHotEncode=unencodeOneHot(trainData)
// unOneHotEncode.show(1)
/* Use the pipepline api to build decision tree model training pipeline and trainValidationSplit api to build
* a evaluation matrix for different hyperparameters */
// DTreePipelineWithCategoricalFeature(unOneHotEncode)
/*We could see the accuracy increase 2%, so it's better*/
/****************************************** 12.6 Random Forest *********************************************/
RandomForestModelWithCategorical(unOneHotEncode)
/* Random decision forests are appealing in the context of big data because trees are supposed to be built
* independently, and big data technologies like Spark and Map‐Reduce inherently need data-parallel problems,
* where parts of the overall solution can be computed independently on parts of the data. The fact that
* trees can, and should, train on only a subset of features or input data makes it trivial to parallelize
* building the trees.*/
}
def DecisionTreePipeLine(trainData:DataFrame):Unit={
import trainData.sparkSession.implicits._
val inputCols=trainData.columns.filter(_ !="Cover_Type")
// group all feautres column to a vector
val assembler = new VectorAssembler().setInputCols(inputCols).setOutputCol("featureVector")
// define the classifier with label, feature, and prediction column.
val dTreeClassifier = new DecisionTreeClassifier()
.setSeed(Random.nextLong())
.setLabelCol("Cover_Type")
.setFeaturesCol("featureVector")
.setPredictionCol("prediction")
// define a pipeline which include the two stages. You can add as many stages as you want into the pipeline
val pipeline = new Pipeline().setStages(Array(assembler,dTreeClassifier))
/* Spark ML api has built-in support ParamGridBuilder to test different combinations of hyperparameters for the
* classifier. In the following example, we have chosen four feature columns, For each feature, we have specified
* two possible values, which means we will compare 16 models with different hyperparameters.*/
val dTreeParamGrid = new ParamGridBuilder()
.addGrid(dTreeClassifier.impurity,Seq("gini","entropy"))
.addGrid(dTreeClassifier.maxDepth,Seq(1,20))
.addGrid(dTreeClassifier.maxBins,Seq(40,300))
.addGrid(dTreeClassifier.minInfoGain,Seq(0.0,0.05))
.build()
/*We also need to define the evaluation metric that will be used to pick the "best" hyperparameters. Here we use
* MulticalssClassificationEvaluator api to calculate accuracy. Note that accuracy is not a very good metric for eval*/
val dTreeMultiClassEval=new MulticlassClassificationEvaluator()
.setLabelCol("Cover_Type")
.setPredictionCol("prediction")
.setMetricName("accuracy")
/* Finally, we will use trainValidationSplit api to bring all the above components together. We could also use a
* crossValidator to perform full k-fold cross-validation, but it's k times more expensive and doesn't add as much
* value in the presence of big dat. So We use TrainValidationSplit.
*
* We set the trainRation to 0.9, which means the training data is actually further subdivided by
* TrainValidationSplit into 9:1 subsets. */
val dTreeValidator=new TrainValidationSplit()
.setSeed(Random.nextLong())
.setEstimator(pipeline)
.setEvaluator(dTreeMultiClassEval)
.setEstimatorParamMaps(dTreeParamGrid)
.setTrainRatio(0.9)
val validatorModel=dTreeValidator.fit(trainData)
val bestModel=validatorModel.bestModel
val paramMap=bestModel.asInstanceOf[PipelineModel].stages.last.extractParamMap
println(s"Best model param map: ${paramMap}")
/*The output looks like this
* Best model param map: {
dtc_268a6a35ce49-cacheNodeIds: false,
dtc_268a6a35ce49-checkpointInterval: 10,
dtc_268a6a35ce49-featuresCol: featureVector,
dtc_268a6a35ce49-impurity: entropy,
dtc_268a6a35ce49-labelCol: Cover_Type,
dtc_268a6a35ce49-maxBins: 40,
dtc_268a6a35ce49-maxDepth: 20,
dtc_268a6a35ce49-maxMemoryInMB: 256,
dtc_268a6a35ce49-minInfoGain: 0.0,
dtc_268a6a35ce49-minInstancesPerNode: 1,
dtc_268a6a35ce49-predictionCol: prediction,
dtc_268a6a35ce49-probabilityCol: probability,
dtc_268a6a35ce49-rawPredictionCol: rawPrediction,
dtc_268a6a35ce49-seed: 2560933445228698700
*
* We can notice that "entropy" worked best as the impurity measure, max depth of 20 is better than 1. It might be
* surprising that the best model was fit within just 40 bins. Lastly, no minimun information gain was better than a
* small minimum which cloud imply that the model is more prone to underfit than overfit.
* */
/* We can also check the accuracy for each hyperparamater combination*/
val paramsAndMetrics = validatorModel.validationMetrics
.zip(validatorModel.getEstimatorParamMaps).sortBy(-_._1)
paramsAndMetrics.foreach{
case(metric,params)=> {println(metric)
println(params)
println()}
}
}
def unencodeOneHot(data:DataFrame):DataFrame={
//build a array of wilderness_area column names, note that 0 until 4 output (0,1,2,3)
val wildernessCols = (0 until 4).map(i=>s"Wilderness_Area_$i").toArray
//use VectorAssembler to assemble all wilderness columns into a Vector
val wildernessAssembler = new VectorAssembler().setInputCols(wildernessCols).setOutputCol("wilderness")
// Find the index of value 1.0 in the vector
val unhotUDF = udf((vec: Vector)=> vec.toArray.indexOf(1.0).toDouble)
/* transform the vector of wilderness by the index of wilderness_i which has value 1.0, and drop all
* column of wilderness_i */
val withWilderness = wildernessAssembler.transform(data)
.withColumn("wilderness",unhotUDF(col("wilderness")))
.drop(wildernessCols:_*)
/*Repeat the same operations for soil type*/
val soilCols = (0 until 40).map(i => s"Soil_Type_$i").toArray
val soilAssembler = new VectorAssembler().setInputCols(soilCols).setOutputCol("soil")
val withSoil=soilAssembler.transform(withWilderness)
.withColumn("soil",unhotUDF(col("soil")))
.drop(soilCols:_*)
return withSoil
/* After unOneHotCode, we have one column wilderness (categorical contains 4 values), and one column soil
* (categorical contains 40 values). With our above operation, if in the origin dataframe, a row has
* Wilderness_Area_2=1.0, and other Wilerness_Area_i=0.0, in the new dataframe, the wilderness column will
* have value 2 (0,1,2,3 are the possible values) */
}
def DTreePipelineWithCategoricalFeature(data:DataFrame):Unit={
val inputCols=data.columns.filter(col=>col!="Cover_Type")
val assembler=new VectorAssembler().setInputCols(inputCols).setOutputCol("featureVector")
val indexer = new VectorIndexer()
// here we set MaxCategories = 40, because soil has 40 values
.setMaxCategories(40)
.setInputCol("featureVector")
.setOutputCol("indexedVector")
val dTreeClassifier=new DecisionTreeClassifier()
.setSeed(Random.nextLong())
.setLabelCol("Cover_Type")
.setFeaturesCol("indexedVector")
.setPredictionCol("prediction")
val pipeline = new Pipeline().setStages(Array(assembler,indexer,dTreeClassifier))
val dTreeParamGrid = new ParamGridBuilder()
.addGrid(dTreeClassifier.impurity,Seq("gini","entropy"))
.addGrid(dTreeClassifier.maxDepth,Seq(15,20))
.addGrid(dTreeClassifier.maxBins,Seq(40,50))
.addGrid(dTreeClassifier.minInfoGain,Seq(0.0,0.05))
.build()
val dTreeMultiClassEval=new MulticlassClassificationEvaluator()
.setLabelCol("Cover_Type")
.setPredictionCol("prediction")
.setMetricName("accuracy")
val dTreeValidator=new TrainValidationSplit()
.setSeed(Random.nextLong())
.setEstimator(pipeline)
.setEvaluator(dTreeMultiClassEval)
.setEstimatorParamMaps(dTreeParamGrid)
.setTrainRatio(0.9)
val validatorModel=dTreeValidator.fit(data)
val bestModel=validatorModel.bestModel
val paramMap=bestModel.asInstanceOf[PipelineModel].stages.last.extractParamMap
println(s"Best model param map: ${paramMap}")
}
def RandomForestModelWithCategorical(data:DataFrame):Unit={
val inputCols=data.columns.filter(col=>col!="Cover_Type")
val assembler=new VectorAssembler().setInputCols(inputCols).setOutputCol("featureVector")
val indexer = new VectorIndexer()
// here we set MaxCategories = 40, because soil has 40 values
.setMaxCategories(40)
.setInputCol("featureVector")
.setOutputCol("indexedVector")
val randomForesetClassifier=new RandomForestClassifier()
.setSeed(Random.nextLong())
.setLabelCol("Cover_Type")
.setFeaturesCol("indexedVector")
.setPredictionCol("prediction")
val pipeline = new Pipeline().setStages(Array(assembler,indexer,randomForesetClassifier))
val randomFParamGrid = new ParamGridBuilder()
.addGrid(randomForesetClassifier.impurity,Seq("gini","entropy"))
.addGrid(randomForesetClassifier.numTrees,Seq(2,5))
.addGrid(randomForesetClassifier.maxDepth,Seq(15,20))
.addGrid(randomForesetClassifier.maxBins,Seq(40,50))
.addGrid(randomForesetClassifier.minInfoGain,Seq(0.0,0.05))
.build()
val randomFMultiClassEval=new MulticlassClassificationEvaluator()
.setLabelCol("Cover_Type")
.setPredictionCol("prediction")
.setMetricName("accuracy")
val randomFValidator=new TrainValidationSplit()
.setSeed(Random.nextLong())
.setEstimator(pipeline)
.setEvaluator(randomFMultiClassEval)
.setEstimatorParamMaps(randomFParamGrid)
.setTrainRatio(0.9)
val validatorModel=randomFValidator.fit(data)
val forestModel=validatorModel.bestModel.asInstanceOf[PipelineModel].stages.last.asInstanceOf[RandomForestClassificationModel]
/*get best model param map*/
val paramMap=forestModel.extractParamMap
println(s"Best model param map: ${paramMap}")
/* get feature importance of best model */
forestModel.featureImportances.toArray.zip(inputCols).
sorted.reverse.foreach(println)
/* We can also check the accuracy for each hyperparamater combination*/
val paramsAndMetrics = validatorModel.validationMetrics
.zip(validatorModel.getEstimatorParamMaps).sortBy(-_._1)
paramsAndMetrics.foreach{
case(metric,params)=> {println(metric)
println(params)
println()}
}
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/IoTDeviceGeoIPDS.scala | package org.pengfei.spark.application.example
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object IoTDeviceGeoIPDS {
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local").
appName("IoTDeviceGeoIPDS").
getOrCreate()
import spark.implicits._
val dataSetFilePath="file:///DATA/data_set/spark/iot_devices.json"
case class DeviceIoTData (battery_level: Long, c02_level: Long, cca2: String,
cca3: String, cn: String,
device_id: Long,
device_name: String,
humidity: Long,
ip: String,
latitude: Double,
longitude: Double,
scale: String,
temp: Long,
timestamp: Long
)
val ds=spark.read.json(dataSetFilePath)
/*println(ds.getClass().getName)
ds.show(5)
*/
//ds.printSchema()
//Q1.
// filter out all devices whose temperature exceed 25 degrees and generate
// another Dataset with three fields that of interest and then display
// the mapped Dataset
val dsTemp25=ds.filter(ds("temp")>25).select(ds("temp"),ds("device_name"),ds("cca3"))
//dsTemp25.show(5)
//Q2.
// Apply higher-level Dataset API methods such as groupBy() and avg().
// Filter temperatures > 25, along with their corresponding
// devices' humidity, compute averages, groupBy cca3 country codes,
// and display the results, using table and bar charts
val dsAvgTmp = ds.filter(ds("temp")>25).select(ds("temp"),ds("humidity"),ds("cca3")).groupBy(ds("cca3")).avg()
//dsAvgTmp.show(5)
//Q3.
// Select individual fields using the Dataset method select()
// where battery_level is greater than 6. Note this high-level
// domain specific language API reads like a SQL query
val dsBatteri6=ds.select($"battery_level", $"c02_level", $"device_name").where($"battery_level" > 6).sort($"c02_level")
// dsBatteri6.show(5)
//Q4.
// Use spark sql query
ds.createOrReplaceTempView("iot_device_data")
val sqlDF = spark.sql("select * from iot_device_data")
//sqlDF.show(5)
//Q5
// count all devices for all countries
val deviceCountDF = spark.sql("select cca3, count(distinct device_id) as count_device from iot_device_data group by cca3 order by count_device desc limit 100")
//deviceCountDF.show(5)
//Q6
// Select all countries' devices with high-levels of C02 and group by cca3 and order by device_ids
val redDeviceCountDF = spark.sql("select cca3, count(distinct device_id) as count_device from iot_device_data where lcd == 'red' group by cca3 order by count_device desc limit 100 ")
val red=ds.filter(ds("lcd")==="red").select(ds("cca3"),ds("device_id")).groupBy(ds("cca3")).count().orderBy($"count".desc)
//red.show(5)
//redDeviceCountDF.show(5)
//Q7
//find out all devices in countries whose batteries need replacements
val batteryRepDF= spark.sql("select count(distinct device_id) as count_device, cca3 from iot_device_data where battery_level == 0 group by cca3 order by count_device desc limit 100 ")
val battery=ds.filter(ds("battery_level")===0).select(ds("cca3"),ds("device_id")).groupBy(ds("cca3")).count().orderBy($"count".desc)
//battery.show(5)
//batteryRepDF.show(5)
//Q8 convert data to RDD
//val deviceEventsDS = ds.select($"device_name",$"cca3", $"c02_level").where($"c02_level" > 1300)
val deviceEventsDS = ds.filter($"c02_level">1300).select($"device_name",$"cca3", $"c02_level")
val eventsRDD = deviceEventsDS.rdd.take(10)
//eventsRDD.foreach(println)
//Q9 get all devices in china
val chineseDevices = ds.filter($"cca3"==="CHN").orderBy($"c02_level".desc)
chineseDevices.show(5)
}
/*
*Q6
* +----+-----+
|cca3|count|
+----+-----+
| USA|17489|
| CHN| 3616|
| KOR| 2942|
| JPN| 2935|
| DEU| 1966|
+----+-----+
Q7
+------------+----+
|count_device|cca3|
+------------+----+
| 7043| USA|
| 1415| CHN|
| 1217| KOR|
| 1210| JPN|
| 760| DEU|
+------------+----+
* */
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/TweeterPopularHashTags.scala | package org.pengfei.spark.application.example
import org.apache.spark.streaming.twitter.TwitterUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
/**
* A Spark Streaming application that receives tweets on certain
* keywords from twitter datasource and find the popular hashtags
*
* Arguments: <comsumerKey> <consumerSecret> <accessToken> <accessTokenSecret> <keyword_1> ... <keyword_n>
* <comsumerKey> - Twitter consumer key
* <consumerSecret> - Twitter consumer secret
* <accessToken> - Twitter access token
* <accessTokenSecret> - Twitter access token secret
* <keyword_1> - The keyword to filter tweets
* <keyword_n> - Any number of keywords to filter tweets
*
* More discussion at stdatalabs.blogspot.com
*
* @author <NAME>
*/
object TweeterPopularHashTags {
val conf = new SparkConf().setMaster("local[4]").setAppName("Spark Streaming - PopularHashTags")
val sc = new SparkContext(conf)
def main(args: Array[String]) {
sc.setLogLevel("WARN")
//val Array(consumerKey, consumerSecret, accessToken, accessTokenSecret) = args.take(4)
val consumerKey="9F2cDP6mBO001MJtFyLybWGqT"
val consumerSecret="<KEY>"
val accessToken="<KEY>"
val accessTokenSecret="<KEY>"
//val filters = args.takeRight(args.length - 4)
val filters = Array("Trump","China")
// Set the system properties so that Twitter4j library used by twitter stream
// can use them to generat OAuth credentials
System.setProperty("twitter4j.oauth.consumerKey", consumerKey)
System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret)
System.setProperty("twitter4j.oauth.accessToken", accessToken)
System.setProperty("twitter4j.oauth.accessTokenSecret", accessTokenSecret)
// Set the Spark StreamingContext to create a DStream for every 5 seconds
val ssc = new StreamingContext(sc, Seconds(5))
// Pass the filter keywords as arguements
// val stream = FlumeUtils.createStream(ssc, args(0), args(1).toInt)
val stream = TwitterUtils.createStream(ssc, None, filters)
// Split the stream on space and extract hashtags
val hashTags = stream.flatMap(status => status.getText.split(" ").filter(_.startsWith("#")))
// Get the top hashtags over the previous 60 sec window
val topCounts60 = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(900))
.map { case (topic, count) => (count, topic) }
.transform(_.sortByKey(false))
// Get the top hashtags over the previous 10 sec window
val topCounts10 = hashTags.map((_, 1)).reduceByKeyAndWindow(_ + _, Seconds(10))
.map { case (topic, count) => (count, topic) }
.transform(_.sortByKey(false))
// print tweets in the currect DStream
stream.print()
// Print popular hashtags
topCounts60.foreachRDD(rdd => {
val topList = rdd.take(10)
println("\nPopular topics in last 60 seconds (%s total):".format(rdd.count()))
topList.foreach { case (count, tag) => println("%s (%s tweets)".format(tag, count)) }
})
topCounts10.foreachRDD(rdd => {
val topList = rdd.take(10)
println("\nPopular topics in last 10 seconds (%s total):".format(rdd.count()))
topList.foreach { case (count, tag) => println("%s (%s tweets)".format(tag, count)) }
})
ssc.start()
ssc.awaitTermination()
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson01_RDD/Lesson01_RDDWithNumTypes.scala | package org.pengfei.Lesson01_RDD
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object Lesson01_RDDWithNumTypes {
/* RDDs containing data elements of type Integer, Long, Float, or Double support a few additional actions that
* are useful for statistical analysis.*/
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().master("local[2]").appName("Lesson1_RDDWithNumTypes").getOrCreate()
val sc = spark.sparkContext
/************************************************************************************************
* *********************************RDD with num type special actions **************************
* **********************************************************************************************/
val nums=sc.parallelize(List(2,5,3,1,9,6))
/************************************** mean *************************************************/
/* The mean method returns the average of the elements in the source RDD */
val mean= nums.mean()
println(s"Mean of nums : ${mean}")
/* *************************************** stdev ********************************************/
/* The stdev method returns the standard deviation of the elements in the source RDD.
*
* In statistics, the standard deviation (SD, also represented by the Greek letter sigma σ or the Latin letter s)
* is a measure that is used to quantify the amount of variation or dispersion of a set of data values.
* A low standard deviation indicates that the data points tend to be close to the mean
* (also called the expected value) of the set, while a high standard deviation indicates that the
* data points are spread out over a wider range of values.*/
val stdev = nums.stdev()
println(s"Stdev of nums : ${stdev}")
/********************************* sum *******************************************************/
/*The sum method returns the sum of the elements in the source RDD.*/
val sum=nums.sum()
println(s"sum of nums: ${sum}")
/********************************** Variance ***************************************************/
/* The variance method returns the variance of the elements in the source RDD.
* In probability theory and statistics, variance is the expectation of the squared deviation of
* a random variable from its mean. Informally, it measures how far a set of (random) numbers are spread out
* from their average value. Variance has a central role in statistics, where some ideas that use it include
* descriptive statistics, statistical inference, hypothesis testing, goodness of fit, and Monte Carlo sampling.
* Variance is an important tool in the sciences, where statistical analysis of data is common. The variance is
* the square of the standard deviation, the second central moment of a distribution, and the covariance of the
* random variable with itself, and it is often represented by s2 or Var(X).*/
val variance=nums.variance()
println(s" variance of nums: ${variance}")
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_1_Spark_Streaming.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_1_Spark_Streaming.scala<gh_stars>0
package org.pengfei.Lesson06_Spark_Streaming
import java.io.{BufferedReader, InputStream, InputStreamReader}
import org.apache.log4j.receivers.net.SocketHubReceiver
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.flume._
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}
import org.apache.spark.streaming.twitter._
import scala.collection.mutable
object Lesson06_1_Spark_Streaming {
def main(args:Array[String])= {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val master="local[2]"
val appName="Lesson6_1_Spark_Streaming"
// you can also set appName in chinese
val chineseAppName="宋体"
val spark=SparkSession.builder().appName(chineseAppName).master(master).getOrCreate()
// DStream Example
DStreamExample(spark)
}
/*************************************************************************************************
* ***************************************6.1. Spark Streaming Introduction***************************
* **********************************************************************************************/
/*
* Batch processing systems have high latency. Depending on the volume of data, it may take anywhere
* from a few minutes to a few hours to process a batch. Some organizations run nightly batch processing jobs
* that run for 6 to 12 hours on a cluster of hundreds of machines. Thus, there is a long wait before you can see
* the results generated by a batch processing application. In addition, since data is not immediate processed,
* the gap between the time when data is collected and the time when the batch processing result becomes
* available is even longer. This time gap is acceptable for a certain class of applications.
*
* However, sometimes data needs to be processed and analyzed as it is collected. For example, fraud
* detection in an e-commerce system must happen in real time. Similarly, network intrusion or security
* breach detection must be in real time. Another example is application or device failure detection in a data
* center. To prevent a long downtime, data must be processed right away.
*
* One of the challenges with live data stream processing is handling high-velocity data in real time or
* near real time. A data stream processing application running on a single machine will not be able to handle
* high-velocity data. A distributed stream processing framework addresses this issue.
*
* In this class, we introduces Spark Streaming. The introduction is followed by a detailed discussion of the
* application programming interface provided by Spark Streaming. At the end of the lesson, you will develop
* an application using Spark Streaming.
*
* The spark official doc can be found: https://spark.apache.org/docs/latest/streaming-programming-guide.html */
/****************************************What is spark Streaming *****************************************/
/*
* Spark Streaming is a distributed data stream processing framework. It makes it easy to develop distributed
* applications for processing live data streams in near real time. It not only provides a simple programming
* model but also enables an application to process high-velocity stream data. It also allows the combining of
* data streams and historical data for processing.
*
* Spark Streaming is a spark add-on (a spark library that runs on top of spark). It extends Spark for data
* steam processing. It provides higher-level abstractions for processing steaming data, but under the hood,
* it uses Spark. Spark streaming lib inherits all the features and benefits of Spark core.
*
* In addition, it can be used along with other Spark libraries, such as Spark sql, ML, MLlib, GraphX.
* Thus, spark steaming makes the power of the complete spark stack available for processing data streams*/
/*************************************************************************************************
* ***************************************6.1.1 Spark Streaming Architecture***************************
* **********************************************************************************************/
/**********************************Basic mechanism*******************************************/
/* Spark Streaming processes a data stream in micro-batches. It splits a data stream into batches of very small
fixed-sized time intervals. Data in each micro-batch is stored as an RDD, which is then processed using
Spark core (see Figure 6-2 page 80). Any RDD operation can be applied to an RDD created by Spark Streaming. The
results of the RDD operations are streamed out in batches.*/
/*********************************Accepted Data source for spark steaming********************************/
/*
* Spark Streaming supports a variety of data stream sources, including TCP socket, Twitter, Kafka, Flume,
* Kinesis, ZeroMQ, and MQTT. It can also be used to process a file as a stream. In addition, you can extend it to
* process data from a custom streaming data source.
*
* The streaming data sources for which Spark Streaming has built-in support can be grouped into two
* categories:
* - basic sources
* - advanced sources.
*
* - Basic data stream sources : include TCP sockets, Akka Actors, and files. Spark Streaming includes the libraries
* required to process data from these sources. A Spark Streaming application that wants
* to process data streams from a basic source needs to link only against the Spark
* Streaming library.
*
* - Advanced data stream sources : include Kafka, Flume, Kinesis, MQTT, ZeroMQ, and Twitter. The libraries required
* for processing data streams from these sources are not included with Spark Streaming,
* but are available as external libraries. A Spark Streaming application that wants
* to process stream data from an advanced source must link against not only the Spark
* Streaming library, but also the external library for that source.
*
* */
/*************************************************************************************************
* ***************************************6.1.2 Spark Streaming key components***************************
* **********************************************************************************************/
/**********************************************Receiver********************************************/
/* A Receiver receives data from a streaming data source and stores it in memory. Spark Streaming creates and
* runs a Receiver on a worker node for each data stream. An application can connect to multiple data streams
* to process data streams in parallel.*/
/*********************************************Destinations*******************************************/
/* The results obtained from processing a data stream can be used in a few different ways (see Figure 6-3, page 81).
* The results may be fed to another application, which may take some action or just display it. For example, a
* Spark Streaming application may feed the results to a dashboard application that is updated continuously.
* Similarly, in a fraud detection application, the results may trigger cancellation of a transaction. The results
* can also be stored in a storage system such as a file or a database.*/
/************************************************************************************************************
* *********************6.2 Spark Streaming Application programming Interface (API)***************************
* ******************************************************************************************************/
def SteamingContextExample():Unit={
/* To use spark streamin, we need to add a dependency in maven : In the below example, we use spark-streaming 2.3.1
* with scala version 2.11
* <dependency>
* <groupId>org.apache.spark</groupId>
* <artifactId>spark-streaming_2.11</artifactId>
* <version>2.3.1</version>
* </dependency>
*/
/********************************6.2.1 Spark Streaming context ******************************/
/* Spark streaming entry point is StreamingContext, the below example shows how to create a StreamingContext
* from scratch. */
val master="local[2]"
val appName="Lesson6_1_Spark_Streaming"
val batchInterval=10
val conf =new SparkConf().setAppName(appName).setMaster(master)
val ssc=new StreamingContext(conf,Seconds(batchInterval))
/* master value can be a:
* - spark standalone cluster url
* - spark Mesos cluster url
* - spark yarn cluster url
* - local[*] local model
*
* you will not want to hardcode master in the program, but rather launch the application with spark-submit and
* receive it there. However, for local testing and unit tests, you can pass “local[*]” to run Spark Streaming
* in-process (detects the number of cores in the local system). Note that this internally creates a SparkContext
* (starting point of all Spark functionality) which can be accessed as ssc.sparkContext. */
/* We can also create a Streaming context from an existing context. The below example shows how to create a
* Streaming Context from a sparkContext/sparkSession */
//val spark=SparkSession.builder().appName(appName).master(master).getOrCreate()
//val ssc1=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
/* Seconds() defines the batch interval of the streaming context(micro batch). It must be set based on the
* latency requirements of your application and available cluster resources. A data stream is split into batches
* of this time duration and each batch is processed as anRDD.
*
* The smallest batch interval is 500 milliseconds. The upper bound is determined by the latency requirements of
* your applicaiton and available memory. The executors created for a Spark Streaming application must have
* sufficient memory to store the received data in memory for good performance.
*
* For a Spark Streaming application running on a cluster to be stable, the system should be able to process data
* as fast as it is being received. In other words, batches of data should be processed as fast as they are
* being generated. Whether this is true for an application can be found by monitoring the processing times
* in the streaming web UI, where the batch processing time should be less than the batch interval.
*
* Depending on the nature of the streaming computation, the batch interval used may have significant impact on
* the data rates that can be sustained by the application on a fixed set of cluster resources. For example,
* let us consider a WordCountNetwork application example. For a particular data rate, the system may be able
* to keep up with reporting word counts every 2 seconds (i.e., batch interval of 2 seconds), but not every
* 500 milliseconds. So the batch interval needs to be set such that the expected data rate in production can
* be sustained.
*
* A good approach to figure out the right batch size for your application is to test it with a conservative
* batch interval (say, 5-10 seconds) and a low data rate. To verify whether the system is able to keep up
* with the data rate, you can check the value of the end-to-end delay experienced by each processed batch
* (either look for “Total delay” in Spark driver log4j logs, or use the StreamingListener interface).
* If the delay is maintained to be comparable to the batch size, then system is stable. Otherwise,
* if the delay is continuously increasing, it means that the system is unable to keep up and it
* therefore unstable. Once you have an idea of a stable configuration, you can try increasing the data
* rate and/or reducing the batch size. Note that a momentary increase in the delay due to temporary data
* rate increases may be fine as long as the delay reduces back to a low value (i.e., less than batch size).
* */
/******************************6.2.2 Spark Streaming application key stages ******************************/
/* After a context is defined, you have to do the following key stages
* 1. - Define the input sources by creating input DStreams.
* 2. - Define the streaming computations by applying transformation and output operations to DStreams.
* 3. - Start receiving data and processing it using streamingContext.start(). Check point can be set after running
* 4. - Wait for the processing to be stopped (manually or due to any error) using streamingContext.awaitTermination().
* 5. - The processing can be manually stopped using streamingContext.stop().
*
* Points to be remembered:
* - Once a context has been started, no new streaming computations can be set up or added to it.
* - Once a context has been stopped, it cannot be restarted.
* - Only one StreamingContext can be active in a JVM at the same time.
* - stop() on StreamingContext also stops the SparkContext. To stop only the StreamingContext, set the optional
* parameter of stop() called stopSparkContext to false.
* - A SparkContext can be re-used to create multiple StreamingContexts, as long as the previous StreamingContext
* is stopped (without stopping the SparkContext) before the next StreamingContext is created.
* */
/***************************** Starting Steam Computation**************************************/
ssc.start()
/* The start method begins stream computation. Nothing really happens in a Spark steaming application
* until the start method is called on an instance of the StreamingContext class. A spark streaming applicaiton
* begins receiving data after it calls the start method. */
/**********************************CheckPointing************************************************/
/* The checkpoint method defined in the streamingContext class tells Spark Streaming to periodically checkpoint
* data. It takes the name of a directory as an argument. For a production application, the checkpoint directory
* should be on a fault-tolerant storage system such as HDFS.*/
val checkPointDir="/tmp/spark-streaming/check-point"
ssc.checkpoint(checkPointDir)
/* A Spark Streaming application must call this method if it needs to recover from driver failures or if it
* performs stateful transformations. The data processed by a Spark Streaming application is conceptually
* a never ending sequence of continuous data. If the machine running the driver program crashes after
* some data has been received but before it has been processed, there is a potential for data loss. Ideally,
* a Spark Streaming application should be able to recover from failures without losing data. To enable this
* functionality, Spark Streaming requires an application to checkpoint metadata.
*
* In addition, data checkpointing is required when an application performs stateful transformation on a
* data stream. A stateful transformation is an operation that combines data across multiple batches in a data
* stream. An RDD generated by a stateful transformation depends on the previous batches in a data stream.
* Therefore, the dependency tree for an RDD generated by a stateful transformation grows with time. In case
* of a failure, Spark Streaming reconstructs an RDD using its dependency tree. As the dependency tree grows,
* the recovery time increases. To prevent recovery time from becoming too high, Spark Streaming checkpoints
* intermediate RDDs of a stateful transformation. Therefore, Spark Streaming requires an application to call
* the checkpoint method prior to using a stateful transformation. The stateful transformations supported by
* Spark Streaming are discussed later in this Lesson.*/
/***********************************Stopping Stream Computation***********************************/
/* The stop method, as the name implies, stops stream computation. By default, it also stops SparkContext.
* This method takes an optional parameter that can be used to stop only the StreamingContext, so that the
* SparkContext can be used to create another instance of StreamingContext.
*
* stop(boolean stopSparkContext, boolean stopGracefully). Stop the execution of the streams, with option of ensuring
* all received data has been processed.
* stopSparkContext - if true, stops the associated SparkContext. The underlying SparkContext will be stopped
* regardless of whether this StreamingContext has been started.
* stopGracefully - if true, stops gracefully by waiting for the processing of all received data to be completed
* By default, they are set to be true. */
// stop only the streaming context, not the spark context
ssc.stop(true)
/*******************************Waiting for Stream computation to finish*****************************/
/* The awaitTermination method in the StreamingContext class makes an application thread wait for stream
* computation to stop.
*
* The awaitTermination method is required if a driver application is multi-threaded and the start
* method was called not from the main application thread but by another thread. The start method in the
* StreamingContext class is blocking method; it does not return until stream computation is finished or
* stopped. In a single-threaded driver, the main thread will wait until the start method returns. However,
* if the start method was called from another thread, you can prevent your main thread from exiting
* prematurely by calling awaitTermination.*/
ssc.awaitTermination()
}
/*****************************************************************************************************
************************************ 6.2.3 Steaming Context data type *****************************
* ***************************************************************************************************/
def DStreamExample(spark:SparkSession):Unit={
val batchInterval=10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val hostName:String="localhost"
val port:Int=9999
// Check point can be also write on hdfs,ssc.checkpoint("/user/hadoop/checkpoint")
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
/******************************************6.2.3.1 Discretized Streams(DStreams)*****************************/
/* DStream is the basic abstraction provided by Spark Streaming. It represents a continuous stream of data,
* either the input data stream received from source, or the processed data stream generated by transforming
* the input stream. Internally, a DStream is represented by a continuous series of RDDs,
* Each RDD in a DStream contains data from a certain interval (figure 6-4, page 85).
*
* DStream is defined as an abstract class in the Spark Streaming library. It defines an interface for
* processing a data stream. Spark Streaming provides concrete classes that implement the DStream interface
* for stream data from a variety of sources. I use the term DStream generically to refer to both the abstract
* class and the classes implementing the interface defined by the DStream class.
*
* Since a DStream is a sequence of RDDs, it inherits the key RDD properties. It is immutable, partitioned, and
* fault tolerant.
* */
/****************************************Creating a DStream ******************************************/
/* A DStream can be created from a streaming data source or from an existing DStream by applying a
* transformation. Since DStream is an abstract class, you cannot directly create an instance of the DStream
* class. The Spark Streaming library provides factory methods for creating instances of the concrete classes
* that implement the interface defined by DStream.
*
* We can get a DStream from a DataSource.
* - Basic DataSource
* -- socketStream
* -- socketTextStream
* -- fileStream
* -- actorStream
* -- queueSteam
*
* - Advance DataSource
* -- Kafka
* -- Flume
* -- Kinesis
*
* Points to remember
* When running a Spark Streaming program locally, do not use “local” or “local[1]” as the master URL. Either of
* these means that only one thread will be used for running tasks locally. If you are using an input DStream
* based on a receiver (e.g. sockets, Kafka, Flume, etc.), then the single thread will be used to run the
* receiver, leaving no thread for processing the received data. Hence, when running locally, always use
* “local[n]” as the master URL, where n > number of receivers to run (see Spark Properties for information
* on how to set the master).
*
* Extending the logic to running on a cluster, the number of cores allocated to the Spark Streaming application
* must be more than the number of receivers. Otherwise the system will receive data, but not be able to
* process it.
*
* */
/**********************************************************************************************************
* *********************************Basic data source************************************************
* ****************************************************************************************************/
/*The following code examples are basic data source in the spark streaming api*/
/*************************************socketTextStream************************************************/
// SocketTextStreamExample(ssc,hostName,port)
/***************************************socketStream*****************************************************/
// SocketStreamExample(ssc,hostName,port)
/**********************************rawSocketStream *****************************************************/
// RawSocketStreamExampl(ssc,hostName,port)
/**********************************TextFileStream***********************************/
// TextFileStreamExample(ssc)
/***********************************Actor Stream*******************************************/
//ActorStreamExampl(ssc)
/***************************************Queue Stream*************************************/
//QueueStreamExampl(ssc)
/**********************************************************************************************************
* *********************************Advance data source************************************************
* ****************************************************************************************************/
/* The factory methods for creating a DStream from advanced sources such as Kafka, Flume, or Twitter are not
* built-in, but available through extra utility classes. To process a data stream from an advanced source, an
* application needs to perform the following steps:
*
* 1. Import the utility class for that source and create a DStream using the factory
* method provided by that class.
* 2. Link against the library that contains the utility class for that source.
* 3. Create an uber JAR that includes all application dependencies and deploy the application on a Spark cluster.*/
/*The following code examples are advance data source in the spark streaming api*/
/**********************************TwitterStream**************************************/
//TwitterStreamExample(ssc)
/*************************************FlumeStream******************************************/
//FlumeStreamExample(ssc,hostName,port)
/*****************************************Kafka Steam ******************************************/
//KafkaStreamExample(ssc)
}
def SocketTextStreamExample(ssc:StreamingContext,hostName:String,port:Int):Unit={
/*************************************socketTextStream************************************************/
/* The socketTextStream method creates a DSteam that represents stream data received over a TCP socket connection.
* It takes three input parameters. The first argument is the hostname of the data source. The second argument
* is the port to connect to for receiving data. The third argument, which is optional, specifies the storage
* level for the received data.
*
* The default value of the storageLevel is StorageLeve1.MEMORY_AND_DISK_SER_2. which stores the received data
* first in memory and spills to disk if the available memory is insufficient to store all received data. In addition,
* it deserializes the received data and reserializes it using Spark’s serialization format. Thus, this storage level
* incurs the overhead of data serialization, but it reduces JVM garbage collection-related issues. The received
* data is also replicated for fault tolerance.*/
val lines:DStream[String] = ssc.socketTextStream(hostName,port,StorageLevel.MEMORY_ONLY)
/*lines is a DSteam of String which represents the stream of data that will be received. */
// Split each lines into words
val words=lines.flatMap(_.split(" "))
// simple word count
val pairs=words.map(word=>(word,1))
val wordCounts=pairs.reduceByKey(_ + _)
wordCounts.print()
ssc.start()
ssc.awaitTermination()
}
/************************Socket Stream********************************************************************/
def SocketStreamExample(ssc:StreamingContext,hostName:String,port:Int):Unit={
/* socketStream takes four arguments, 1st is hostname:String, 2nd is port:Int, 3rd is converter:scala.Function, 4th is
* storageLevel.*/
val lines=ssc.socketStream(hostName,port,linesHash,StorageLevel.MEMORY_ONLY)
// The linesHash is an example of the converter function which takes InputStream as input and return a Iterator as
// output
lines.print()
ssc.start()
ssc.awaitTermination()
}
/**************************************rawSocketStream******************************************/
def RawSocketStreamExampl(ssc:StreamingContext,host:String,port:Int):Unit={
val numStreams=3
/**
* Receives text from multiple rawNetworkStreams and counts how many '\n' delimited
* lines have the word 'the' in them. This is useful for benchmarking purposes. This
* will only work with spark.streaming.util.RawTextSender running on all worker nodes
* and with Spark using Kryo serialization (set Java property "spark.serializer" to
* "org.apache.spark.serializer.KryoSerializer").
* Usage: RawNetworkGrep <numStreams> <host> <port> <batchMillis>
* <numStream> is the number rawNetworkStreams, which should be same as number
* of work nodes in the cluster
* <host> is "localhost".
* <port> is the port on which RawTextSender is running in the worker nodes.
* <batchMillise> is the Spark Streaming batch duration in milliseconds.
*/
//For each stream nums, we create a rawSocketStream
val rawStreams=(1 to numStreams).map(_=>ssc.rawSocketStream[String](host,port,StorageLevel.MEMORY_AND_DISK_SER_2)).toArray
// we union the Dstream of the three rawSocketStream
val union=ssc.union(rawStreams)
//union represent the union of the data received from the three socket
union.print()
union.filter(_.contains("the")).count().foreachRDD(r =>
println("Grep count: " + r.collect().mkString))
ssc.start()
ssc.awaitTermination()
}
/*******************************************TextFileStream*************************************************/
def TextFileStreamExample(ssc:StreamingContext):Unit={
val dirPath="/tmp/spark-streaming/file_stream_test"
/* The textFileStream method creates a DStream that monitors a Hadoop-compatible file system for new files
* and reads them as text files. It takes a directory path as input to monitor. Files must be written to the
* monitored directory by moving them from another location within the same file system. For example, on a
* Linux system, files should be written into the monitored directory using the mv command.*/
val lines=ssc.textFileStream(dirPath)
val wordCount=lines.flatMap(line=>line.split(" ")).map(word=>(word,1)).reduceByKey((x,y)=>x+y)
wordCount.print()
}
/***************************************Actor Stream*********************************************/
def ActorStreamExampl(ssc:StreamingContext):Unit={
/* The actorStream method creates a DStream with a user-implemented Akka actor Receiver.*/
}
/***************************************Queue( of RDD) Stream*********************************************/
def QueueStreamExampl(ssc:StreamingContext):Unit={
/* The Queue objects implement data structures that allow to insert and retrieve elements in a first-in-first-out
* (FIFO) manner*/
val rddQueue = new mutable.Queue[RDD[Int]]()
val inputStream=ssc.queueStream(rddQueue)
val mappedStream=inputStream.map(x=>(x%10,1))
val reducedStream = mappedStream.reduceByKey(_ + _)
reducedStream.print()
ssc.start()
//create and push some RDDs into rddQueue
for(i<-1 to 30){
rddQueue.synchronized{
rddQueue += ssc.sparkContext.makeRDD(1 to 1000, 10)
}
Thread.sleep(1000)
}
ssc.stop()
}
/****************************************************** Twitter Stream Example ***************************************/
def TwitterStreamExample(ssc:StreamingContext):Unit={
/*System.setProperty("twitter4j.oauth.consumerKey","consumerKey")
System.setProperty("twitter4j.oauth.consumerSecret","consumerSecret")
System.setProperty("twitter4j.oauth.accessToken",accessToken)
System.setProperty("twitter4j.oauth.accessTokenSecret", "accessTokenSecret")*/
TwitterUtils.createStream(ssc,None)
}
/***********************************************FlumeStreamExample************************************************/
def FlumeStreamExample(ssc:StreamingContext,host:String,port:Int):Unit={
val stream = FlumeUtils.createStream(ssc, host, port, StorageLevel.MEMORY_ONLY_SER_2)
// Print out the count of events received from this server in each batch
stream.count().map(cnt => "Received " + cnt + " flume events." ).print()
val mappedlines = stream.map{sparkFlumeEvent =>
val event = sparkFlumeEvent.event
println("Value of event " + event)
println("Value of event Header " + event.getHeaders)
println("Value of event Schema " + event.getSchema)
val messageBody = new String(event.getBody.array())
println("Value of event Body " + messageBody)
messageBody}.print()
ssc.start()
ssc.awaitTermination()
}
/********************************************Kafka Stream Example*********************************************/
//Not tested
def KafkaStreamExample(ssc:StreamingContext):Unit={
//Kafka runs on top of ZooKeeper, so we are actually doing stream on zookeeper
val zkQuorum = "hadoop-nn.bioaster.org:2181" //Zookeeper server url
// config Kafka group and topic
val group = "1" //set topic group, for example val group = "test-consumer-group"
val topics = "Hello-Kafka" //topics name
val numThreads = 3 //set topic partition number
val topicMap =topics.split(",").map((_,numThreads.toInt)).toMap
// create kafkaStream
val lineMap = KafkaUtils.createStream(ssc,zkQuorum,group,topicMap)
val lines = lineMap.map(_._2)
val words = lines.flatMap(_.split(" "))
val pair = words.map(x => (x,1))
val wordCounts = pair.reduceByKeyAndWindow(_ + _,_ - _,Minutes(2),Seconds(10),2)
wordCounts.print
ssc.start
ssc.awaitTermination
}
/*
* An iterator is not a collection(list, array, etc.) but a way to access the elements of a collection one by one.
* The basic operation of it is next and hasNext. Calling next will return the next element of the current iterator
* position, if there are not more elements to return, it will throw NoSuchElementException. As it is a traversal
* pointer into a collection. It can be used only once*/
def linesHash(inputStream:InputStream):Iterator[(String,String)]={
val dataInputStream = new BufferedReader(new InputStreamReader(inputStream,"UTF-8"))
dataInputStream.read().toString.map(x=>(x.toString,x.hashCode().toString)).iterator
}
} |
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_2_2_1_Clustering_Algo.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson05_Spark_ML
import org.apache.spark.sql.SparkSession
object Lesson05_2_2_1_Clustering_Algo {
/******************************************************************************************************************
* *****************************************5.2.2.1 Clustering Algorithms *****************************************
* **************************************************************************************************************/
/* Clustering is a Machine Learning technique that involves the grouping of data points. Given a set of data points,
* we can use a clustering algorithm to classify each data point into a specific group. In theory, data points that
* are in the same group should have similar properties and/or features, while data points in different groups should
* have highly dissimilar properties and/or features. Clustering is a method of unsupervised learning and is a
* common technique for statistical data analysis used in many fields.
*
* In Data Science, we can use clustering analysis to gain some valuable insights from our data by seeing what
* groups the data points fall into when we apply a clustering algorithm. Here, we’re going to look at 5 popular
* clustering algorithms that data scientists need to know and their pros and cons!
*
* The below 5 algo are from https://towardsdatascience.com/the-5-clustering-algorithms-data-scientists-need-to-know-a36d136ef68*/
def main(args:Array[String])={
}
/***********************************************K-means/K-median algo******************************************/
def KMeansExample(spark:SparkSession):Unit={
/*
* The k-means algorithm finds groupings or clusters in a dataset. It is an iterative algorithm that partitions
* data into k mutually exclusive clusters, where k is a number specified by a user.
*
* The k-means algorithm uses a criterion known as within-cluster sum-of-squares for assigning
* observations to clusters. It iteratively finds cluster centers and assign observations to clusters such that
* within-cluster sum-of-squares is minimized.
*
* The number of clusters in which data should be segmented is specified as an argument to the k-means algorithm.
* The k-mean algo has four principal steps
* Step1 : we select a number of classes/groups to use and randomly initialize their respective center points.
* To figure out the number of classes to use, it’s good to take a quick look at the data and try to
* identify any distinct groupings. The center points are vectors of the same length as each data
* point vector and are the “X’s” in the graphic above.
* Step2 : Each data point is classified by computing the distance between that point and each group center,
* and then classifying the point to be in the group whose center is closest to it.
* Step3 : Based on these classified points, we recompute the group center by taking the mean of all the vectors
* in the group.
* Step4 : Repeat these steps for a set number of iterations or until the group centers don’t change much between
* iterations. You can also opt to randomly initialize the group centers a few times, and then select the
* run that looks like it provided the best results.
*
* K-Means has the advantage that it’s pretty fast, as all we’re really doing is computing the distances between
* points and group centers; very few computations! It thus has a linear complexity O(n).
*
* On the other hand, K-Means has a couple of disadvantages. Firstly, you have to select how many groups/classes
* there are. This isn’t always trivial and ideally with a clustering algorithm we’d want it to figure those out
* for us because the point of it is to gain some insight from the data. K-means also starts with a random choice
* of cluster centers and therefore it may yield different clustering results on different runs of the algorithm.
* Thus, the results may not be repeatable and lack consistency. Other cluster methods are more consistent.
*
* K-Medians is another clustering algorithm related to K-Means, except instead of recomputing the group center
* points using the mean we use the median vector of the group. This method is less sensitive to outliers
* (because of using the Median) but is much slower for larger datasets as sorting is required on each iteration
* when computing the Median vector.
* */
/**************************************************Mean-Shift Clustering*****************************************/
def MeanShiftClusteringExample(spark:SparkSession):Unit={
/* Mean shift clustering is a sliding-window-based algorithm that attempts to find dense areas of data points.
* It is a centroid-based algorithm meaning that the goal is to locate the center points of each group/class,
* which works by updating candidates for center points to be the mean of the points within the sliding-window.
* These candidate windows are then filtered in a post-processing stage to eliminate near-duplicates, forming
* the final set of center points and their corresponding groups. Check out the graphic below for an illustration.*/
}
/**/
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/classification/SupportVectorMachineRDD.scala | <gh_stars>0
package org.pengfei.spark.ml.classification
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.mllib.classification.SVMWithSGD
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
object SupportVectorMachineRDD {
def main(args:Array[String]): Unit = {
/*
* Init the spark session
* */
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local").
appName("SVMRDDClassification").
getOrCreate()
/*
* read data from csv file
* */
import spark.implicits._
val inputFile = "file:///home/pliu/Documents/spark_input/iris.txt"
val data = spark.read.text(inputFile).as[String]
val parsedData = data.map { line => val parts = line.split(',')
LabeledPoint(
if(parts(4)=="Iris-setosa") 0.toDouble
else if (parts(4) =="Iris-versicolor") 1.toDouble
else 2.toDouble, Vectors.dense(parts(0).toDouble,parts(1).toDouble,parts
(2).toDouble,parts(3).toDouble))
}.rdd
println(parsedData.getClass.getName)
//parsedData.show(5)
val splits = parsedData.filter { point => point.label != 2 }.randomSplit(
Array(0.6, 0.4), seed = 11L)
val training = splits(0).cache()
val test = splits(1)
val numIterations = 1000
val model = SVMWithSGD.train(training, numIterations)
model.clearThreshold()
val scoreAndLabels = test.map { point =>
val score = model.predict(point.features)
(score, point.label)
}
scoreAndLabels.foreach(println)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_2_2_3_dimensionality_reduction_Algo.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_2_2_3_dimensionality_reduction_Algo.scala
package org.pengfei.Lesson05_Spark_ML
object Lesson05_2_2_3_dimensionality_reduction_Algo {
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_3_Model_Validation.scala | <reponame>pengfei99/Spark<gh_stars>0
package org.pengfei.Lesson05_Spark_ML
import org.apache.spark.sql.SparkSession
object Lesson05_3_Model_Validation {
def main(args:Array[String])={
}
/***********************************************AUC************************************************************/
def AUCExamples(spark:SparkSession):Unit={
/*
* Area under Curve (AUC), also known as Area under ROC, is a metric generally used for evaluating binary
* classifiers (see Figure 8-9 page 169). It represents the proportion of the time a model correctly predicts
* the label for a random positive or negative observation. It can be graphically represented by plotting the
* rate of true positives predicted by a model against its rate of false positives. The best classifier has
* the largest area under the curve.
*
* A model that just randomly guesses the label for an observation will have an AUC score of approximately 0.5.
* A model with an AUC score of 0.5 is considered worthless. A perfect model has an AUC score of 1.0. It predicts
* zero false positives and zero false negatives.
* */
}
/*************************************************F Mesure ***************************************************/
def FMesureExample(spark:SparkSession):Unit={
/* F-measure, also known as F-score or F1 score, is another commonly used metric for evaluating classifiers.
* Let’s define two other terms—recall and precision—before defining F-measure.
*
* Recall is the fraction of the positive examples classified correctly by a model. The formula for calculating
* recall is shown next.
*
* Recall = TP / (TP + FN), where TP = True Positives, and FN = False Negatives
*
* Precision is the ratio of true positives to all positives classified by a model. It is calculated using the
* following formula.
*
* Precision = TP / (TP + FP), where TP = True Positives, and FP = False Positives
*
* The F-measure of a model is the harmonic mean of the recall and precision of a model. The formula for
* calculating the F-measure of a model is shown here.
*
* F-measure = 2* (precision * recall) / (precision + recall)
*
* The F-measure of a model takes a value between 0 and 1. The best model has an F-measure equal to 1,
* whereas a model with an F-score of 0 is the worst.
*
* */
}
/*************************************************Root Mean Squared Error (RMSE)*************************************/
def RMSEExample(spark:SparkSession):Unit={
/* The RMSE metric is generally used for evaluating models generated by regression algorithms. A related
* metric is mean squared error (MSE). An error in the context of a regression algorithm is the difference
* between the actual and predicted numerical label of an observation. As the name implies, MSE is the mean
* of the square of the errors. It is mathematically calculated by squaring the error for each observation and
* computing the mean of the square of the errors. RMSE is mathematically calculated by taking a square root
* of MSE.
*
* RMSE and MSE represent training error. They indicate how well a model fits a training set. They capture
* the discrepancy between the observed labels and the labels predicted by a model.
* A model with a lower MSE or RMSE represents a better fitting model than one with a higher MSE or RMSE.*/
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/feature/transformation/StringFeatureTransformation.scala | package org.pengfei.spark.ml.feature.transformation
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.feature._
import org.apache.spark.ml.linalg.Vectors
object StringFeatureTransformation {
/*
* As most of the machine learning model does not deal with string/text, we need to transform these string feature data
* into digit (int, float) index, and after the model training, we need to transfer the digit index back to string/text
*
* The org.apache.spark.ml.feature package provides many transformation fuctions such as
* - StringIndexer
* - IndexToString
* - OneHotEncoder
* - VectorIndexer
* */
/***************************************************************************
********************** Basic StringIndexer ********************************
***************************************************************************/
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("StringFeatureTransformation").
getOrCreate()
//spark.conf.set("")
import spark.implicits._
val df1 = spark.createDataFrame(Seq(
(0, "a"), (1, "b"), (2, "c"),
(3, "a"), (4, "a"), (5, "c")
)).toDF("id","category")
// println(df1.show(5))
/*
* +---+--------+
| id|category|
+---+--------+
| 0| a|
| 1| b|
| 2| c|
| 3| a|
| 4| a|
+---+--------+
* */
// Build the StringIndexer model with input and output column
val indexer = new StringIndexer().setInputCol("category").setOutputCol("categoryIndex")
// Train the model with a dataframe
val model = indexer.fit(df1)
// After trainning, we can transform the dataframe with the model
val indexed1 = model.transform(df1)
// print(indexed1.show())
/*
* +---+--------+-------------+
| id|category|categoryIndex|
+---+--------+-------------+
| 0| a| 0.0|
| 1| b| 2.0|
| 2| c| 1.0|
| 3| a| 0.0|
| 4| a| 0.0|
| 5| c| 1.0|
+---+--------+-------------+
*
* */
/******************Handle Unseen label error*******************/
/*
* Unseen label error happens when you build and train a model with a set of labels (e.g. a,b,c),
* If we use this model to transform a data set which contains label (e.g. d) does not in the set of
* labels.
*
* This error can be fixed by adding setHandleInvalid("skip"), but this will remove all the rows which
* contains the label d*/
val df2 = spark.createDataFrame(Seq(
(0, "a"), (1, "b"), (2, "c"),
(3, "a"), (4, "a"), (5, "d")
)).toDF("id","category")
// The following two lines will generate unseen label errors
// val indexedDf2= model.transform(df2)
// println(indexedDf2.show())
// The following line will skip all label which is not in the model
// val indexed2 = model.setHandleInvalid("skip").transform(df2)
// println(indexed2.show())
/*To get all the rows, we need to train a new model with the new labels*/
val model2=indexer.fit(df2)
val indexed2=model2.transform(df2)
//println(indexed2.show())
/********************************************************************
************************ Index to String *********************************
************************************************************************* */
val converter = new IndexToString().setInputCol("categoryIndex").setOutputCol("originalCategory")
val originFeature=converter.transform(indexed2)
// println(originFeature.show())
/****************************************************************
************************ OneHotEncoder **************************
************************************************************* */
val dfCatgegorical= spark.createDataFrame(Seq(
(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"),
(5, "c"), (6, "d"), (7, "d"), (8, "d"), (9, "d"),
(10, "e"), (11, "e"), (12, "e"), (13, "e"), (14, "e")
)).toDF("id","category")
val indexerOHE=new StringIndexer().setInputCol("category").setOutputCol("categoryIndex")
val modelOHE=indexerOHE.fit(dfCatgegorical)
val dfIndexed=modelOHE.transform(dfCatgegorical)
print(dfIndexed.show())
//.setDropLast(false)
val encoderOHE=new OneHotEncoder().setInputCol("categoryIndex").setOutputCol("categoryVec")
val dfEncoded = encoderOHE.transform(dfIndexed)
print(dfEncoded.show())
/*
* With option setDropLast(false)
* | id|category|categoryIndex| categoryVec|
+---+--------+-------------+-------------+
| 0| a| 2.0|(5,[2],[1.0])|
| 1| b| 4.0|(5,[4],[1.0])|
*
*
* 5 -> 5 element in category
* [2] -> category value is 2
* [1.0] -> value presented
*
* Wihout option setDropLast(false)
*
* | id|category|categoryIndex| categoryVec|
+---+--------+-------------+-------------+
| 0| a| 2.0|(4,[2],[1.0])|
| 1| b| 4.0| (4,[],[])|
*
* 4 -> 4 element only (5th element is dropped because it's last)
* as b is the 5th element which is dropped, it has no index value
* */
/*******************************************************************************
***********************VectorIndexer ****************************************
* ***************************************************************************/
/*
* StringIndexer only works for single string, if all the data has been already transformed into a vector
* We will need vectorIndexer to do the job
* */
val vecData=Seq(
Vectors.dense(-1.0, 1.0, 1.0),
Vectors.dense(-1.0, 3.0, 1.0),
Vectors.dense(0.0, 5.0, 1.0)
)
val vecDF=spark.createDataFrame(vecData.map(Tuple1.apply)).toDF("features")
// The setMaxCategories(2) defienes that only the possible value is 2 or less will be considered as categorical features
// This option will help vectorIndexer to identify categorical features
val vecIndexer=new VectorIndexer().setInputCol("features").setOutputCol("indexed").setMaxCategories(2)
val vecIndexerModel = vecIndexer.fit(vecDF)
val categoricalFeatures: Set[Int] = vecIndexerModel.categoryMaps.keys.toSet
println(s"Chose ${categoricalFeatures.size} categorical features: " + categoricalFeatures.mkString(", "))
// With the above config, we have 2 categorical features, the column index is 0 and 2.
// Because the column 1 has three values which is not met with setMaxCategories(2)
// If we change the config to setMaxCategories(3) ,the output will be
// Chose 3 categorical features: 0, 1, 2
val indexedVecDF = vecIndexerModel.transform(vecDF)
/*
* +--------------+-------------+
| features| indexed|
+--------------+-------------+
|[-1.0,1.0,1.0]|[1.0,1.0,0.0]|
|[-1.0,3.0,1.0]|[1.0,3.0,0.0]|
| [0.0,5.0,1.0]|[0.0,5.0,0.0]|
+--------------+-------------+
* The column 0 has only two values (-1.0 and 0.0), the VectorIndexer index them as (1.0 and 0.0)
* The column 1 has 3 values -> not a categorical feautre -> no indexing
* The column 2 has only one value (1.0), the VectorIndexer index it as 0.0
* */
print(indexedVecDF.show())
}
}
|
pengfei99/Spark | Spark3/src/main/scala/org/pengfei/Lesson04_Spark_SQL/Lesson04_8_Spark_SQL_UDF.scala | package org.pengfei.Lesson04_Spark_SQL
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Column, SparkSession}
import org.apache.spark.sql.functions.{col, lower, regexp_replace, udf}
object Lesson04_8_Spark_SQL_UDF {
def main(args: Array[String]) = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson04_8_Spark_SQL_UDF").getOrCreate()
/** ************************************4.8 Spark SQL UDF ***********************************************/
/* Spark let’s you define custom SQL functions called user defined functions (UDFs). UDFs are great when
* built-in SQL functions are not able to do what you want, but should be avoid when possible. Because they have
* very poor performance. For more information, please check:
* https://jaceklaskowski.gitbooks.io/mastering-spark-sql/content/spark-sql-udfs-blackbox.html
* */
// udfExp1(spark)
// This wil raise null pointer exception
// udfExp2(spark)
//Udf handles null value
//udfExp3(spark)
// Use column function
udfExp4(spark)
}
/** **************************************4.8.1 Define a udf **********************************/
/* First step: we create a normal scala function*/
def lowerRemoveAllWhitespace(s: String, replacement: String): String = {
s.toLowerCase().replaceAll("\\s", replacement)
}
/* Second step: we register the above scala function as a spark UDF by using function spark.sql.functions.udf
*
* Note that the udf function take a list of argument type. The first type is the type of the lowerRemoveAllWhitespace
* return value. The second and third type are the type of the lowerRemoveAllWhitespace argument.
*
* */
val lowerRemoveAllWhitespaceUDF = udf[String, String, String](lowerRemoveAllWhitespace)
/** **********************************4.8.2 Use a udf ***************************************/
/* Run the udfExp1 to see how the udf acts on the data frame*/
def udfExp1(spark: SparkSession) = {
import spark.implicits._
val sourceDF = Seq(
(" HI THERE ", "_"),
(" GivE mE PresenTS ", "")
).toDF("val", "replacement")
sourceDF.select(
lowerRemoveAllWhitespaceUDF(col("val"), col("replacement")).as("clean_val")
).show()
}
/** ******************************** 4.8.3 Handle the null value in a udf **********************/
/* If we have null value in the sourceDF, our udf will raise errors. Because our udf does not handle the null value
* at all. So now we have to add logic to handle the null value. But we don't want to do that.
*
* We can use the option type to handle the null value, thanks to Scala.
* */
def udfExp2(spark: SparkSession) = {
import spark.implicits._
val sourceDF = Seq(
(" HI THERE ", "_"),
(" GivE mE PresenTS ", ""),
(null, "")
).toDF("val", "replacement")
sourceDF.select(
lowerRemoveAllWhitespaceUDF(col("val"), col("replacement")).as("clean_val")
).show()
}
/* Here we write a new udf which handles the null value.
* First we transform the two string argument to Option[String]
* Second, we transform the return string value to Option[String]*/
def betterLowerRemoveAllWhitespace(s: String, replacement: String): Option[String] = {
val str = Option(s).getOrElse(return None)
val rep = Option(replacement).getOrElse(return None)
Some(str.toLowerCase().replaceAll("\\s", rep))
}
val betterLowerRemoveAllWhitespaceUDF = udf[Option[String], String, String](betterLowerRemoveAllWhitespace)
def udfExp3(spark: SparkSession) = {
import spark.implicits._
val sourceDF = Seq(
(" HI THERE ", "_"),
(" GivE mE PresenTS ", ""),
(null, "")
).toDF("val", "replacement")
sourceDF.select(
betterLowerRemoveAllWhitespaceUDF(col("val"), col("replacement")).as("clean_val")
).show()
// Explain the physical plan of the udf
sourceDF.select(
betterLowerRemoveAllWhitespaceUDF(col("val"), col("replacement")).as("clean_val")
).explain()
}
/** ******************************** 4.8.4 Use column function to replace UDF **********************/
/*
* As we mentioned early, UDF is not optimal at all. So use UDF as less as you can. In this section, we show you how to
* define column function which can apply directly to a data frame without creating UDF.
*
* Note that, lower and regexp_replace are built-in functions in spark.sql.functions.
* */
def bestLowerRemoveAllWhitespace(col: Column): Column = {
lower(regexp_replace(col, "\\s+", ""))
}
def udfExp4(spark: SparkSession) = {
import spark.implicits._
val sourceDF = Seq(
(" HI THERE ", "_"),
(" GivE mE PresenTS ", ""),
(null, "")
).toDF("val", "replacement")
sourceDF.select(
bestLowerRemoveAllWhitespace(col("val")).as("clean_val")
).show()
// Explain the physical plan
sourceDF.select(
bestLowerRemoveAllWhitespace(col("val")).as("clean_val")
).explain()
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/basics/DataFrameWithArrayTypeAndMapType.scala | package org.pengfei.spark.basics
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.functions.split
import org.apache.spark.sql.types._
/*
* https://medium.com/@mrpowers/working-with-spark-arraytype-and-maptype-columns-4d85f3c8b2b3
* */
object DataFrameWithArrayTypeAndMapType {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("CalHousing").
getOrCreate()
// import sparkSession.implicits._ for all schema conversion magic.
import spark.implicits._
/*
Exp1.
val singersDF = Seq(("beatles", "help|hey jude"),
("romeo", "eres mia")).toDF("name", "hit_songs")
singersDF.show()
print(singersDF.getClass.getName)
val actualDF = singersDF.withColumn("hit_songs", split(singersDF("hit_songs"),"\\|"))
actualDF.show()*/
/*val data = Seq(("bieber", Array("baby", "sorry")),
("ozuna", Array("criminal")))
val schema = List(
StructField("name", StringType, true),
StructField("hit_songs", ArrayType(StringType, true), true)
)
val singersDF = spark.createDataFrame(
spark.sparkContext.parallelize(data),StructType(schema))*/
val someData = Seq(
Row(8, "bat"),
Row(64, "mouse"),
Row(-27, "horse")
)
val someSchema = List(
StructField("number", IntegerType, true),
StructField("word", StringType, true)
)
val someDF = spark.createDataFrame(
spark.sparkContext.parallelize(someData),
StructType(someSchema)
)
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/streaming/flume/FlumeEventCount.scala | package org.pengfei.spark.streaming.flume
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.flume._
import org.apache.spark.util.IntParam
object FlumeEventCount {
def main(args: Array[String]) {
//StreamingExamples.setStreamingLogLevels()
val host="127.0.0.1"
val port=6666
val batchInterval = Milliseconds(2000)
// Create the context and set the batch size
val sparkConf = new SparkConf().setAppName("FlumeEventCount").setMaster("local[2]")
val ssc = new StreamingContext(sparkConf, batchInterval)
// Create a flume stream
val stream = FlumeUtils.createStream(ssc, host, port, StorageLevel.MEMORY_ONLY_SER_2)
// Print out the count of events received from this server in each batch
stream.count().map(cnt => "Received " + cnt + " flume events." ).print()
val mappedlines = stream.map{sparkFlumeEvent =>
val event = sparkFlumeEvent.event
println("Value of event " + event)
println("Value of event Header " + event.getHeaders)
println("Value of event Schema " + event.getSchema)
val messageBody = new String(event.getBody.array())
println("Value of event Body " + messageBody)
messageBody}.print()
ssc.start()
ssc.awaitTermination()
}
} |
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson14_Understanding_Wiki_With_Latent_Semantic_Analysis/Lesson14_Latent_Semantic_Analysis.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson14_Understanding_Wiki_With_Latent_Semantic_Analysis/Lesson14_Latent_Semantic_Analysis.scala
package org.pengfei.Lesson14_Understanding_Wiki_With_Latent_Semantic_Analysis
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.ml.feature.{CountVectorizer, IDF, RegexTokenizer, StopWordsRemover}
import org.apache.spark.ml.linalg.{Vector => MLVector}
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.linalg.{Matrix, SingularValueDecomposition, Vectors, Vector => MLLibVector}
import org.apache.spark.sql.functions._
import scala.collection.mutable.ArrayBuffer
object Lesson14_Latent_Semantic_Analysis {
/* Origin book maven repo https://github.com/sryza/aas/tree/master/ch06-lsa/src/main */
/********************************************************************************************************************
* ************************************************14.1 Introduction ********************************************
* ***************************************************************************************************************/
/* Most of the work in data engineering consists of assembling heteogenous data into some sort of queryable format.
* We can query structured data with formal language. For example, if the structured data is tabular, we can use SQL.
* Making tabular data accessible is often straightforward - pull data from a variety of data sources into a single
* table, perhaps cleaning or fusing intelligently along the way.
*
* Unstructured text data presents a whole different set of challenges. The process of preparing data into a format
* that humans can interact with is not so much "assembly", but rather "indexing" in the nice case or "coercion"
* when things get ugly. A standard search index permits fast queries for teh set of documents that contains a
* given set of terms. Sometimes, however, we want to find documents that relate to the concepts surrounding a
* particular word even if the documents do not contain that exact string. Standard search indexes often fail to
* capture the latent structure in the text's subject matter.
*
* Latent semantic analysis (LSA) is a technique in natural language processing and information retrieval that seeks
* to better understand a corpus of the documents and the relationships between the words in those documents. It
* attempts to distill the corpus into a set of relevant concepts. Each concept captures a thread of variation in the
* data and often corresponds to a topic that the corpus discusses. Each concept consists of three attributes:
* - a level of affinity for each document in the corpus
* - a level of affinity for each term in the corpus
* - an importance score reflecting how useful the concept is in the describing variance in the data set.
*
* For example, LSA might discover a concept with high affinity for the terms "Asimov" and "robot". FYI, <NAME>
* is a writer who wrote many science fiction stories about robots(e.g. "foundation series"). LSA might also discover
* high affinity for the documents "foundation series" and "science fiction". By selecting only the most important
* concepts, LSA can throw away some irrelevant noise and merge co-occurring strnads to come up with a simpler
* representation of the data.
*
* We can employ this concise representation in a variety of tasks. It can provide scores of similarity between terms
* and other terms, between documents and other documents, and between terms and documents. By encapsulating the
* patterns of variance in the corpus, it can base scores on a deeper understanding than simply on counting
* occurrences and co-occurrences of words. These similarity measures are ideal for tasks such as finding the set of
* documents relevant to query terms, grouping documents into topics, and finding related works.
*
* LSA discovers this lower-dimensional representation using a linear algebra technique called SVD. SVD can be thought
* of as a more powerful version of the ALS factorization described in Lesson11_Product_Recommendation. It starts with
* a document-term matrix generated through counting word frequencies for each document. In this matrix, each document
* corresponds to a column, each term corresponds to a row, and each element represents the importance of a word to a
* document. SVD then factorizes this matrix into three matrices, one of which expresses concepts in regard to
* documents, one expresses concepts in regard of terms, and last one contains the importance for each concept.
* The structure of these matrices can achieve a low-rank approximation of the original matrix by removing a set of
* rows and columns corresponding to the least important concepts. That is, the matrices in this low-rank
* approximation can be multiplied to produce a matrix close to the original, with increasing loss of fidelity as
* each concept is removed.
*
* In this Lesson, we’ll embark upon the modest task of enabling queries against the full extent of human knowledge
* based on its latent semantic relationships. More specifically, we’ll apply LSA to a corpus consisting of the full
* set of articles contained in Wikipedia, which is about 46 GB of raw text. We’ll cover how to use Spark for
* preprocessing the data: reading it, cleansing it, and coercing it into a numerical form. We’ll show how to compute
* the SVD and explain how to interpret and make use of it.
*
* SVD(singular-value decomposition) has wide applications outside LSA. It appears in such diverse places as detecting
* climatological trends (<NAME>’s famous “hockey-stick” graph), face recognition, and image compression. check
* (https://en.wikipedia.org/wiki/Singular_value_decomposition) for more details
* Spark’s implementation can perform the matrix factorization on enormous data sets, which opens up the technique
* to a whole new set of applications
*
* */
/********************************************************************************************************************
* ************************************** 14.2 The Document-Term Matrix ********************************************
* ***************************************************************************************************************/
/* Before performing any analysis, LSA requires transforming the raw text of the corpus into a document-term matrix.
* In this matrix, each column represents a term that occurs in the corpus, and each row represents a document.
* Loosely, the value at each position should correspond to the importance of the column’s term to the row’s document.
* A few weighting schemes have been proposed, but by far the most common is term frequency times inverse document
* frequency, or TF-IDF. Here’s a representation in Scala code of the formula. We won’t actually end up using this
* code because Spark provides its own implementation.
*
* */
def termDocWeight(termFrequencyInDoc: Int, totalTermsInDoc: Int,
termFreqInCorpus: Int, totalDocs: Int): Double = {
val tf = termFrequencyInDoc.toDouble / totalTermsInDoc
val docFreq = totalDocs.toDouble / termFreqInCorpus
val idf = math.log(docFreq)
tf * idf
}
/* TF-IDF captures two intuitions about the relevance of a term to a document. First, we would expect that
* the more often a term occurs in a document, the more important it is to that document. Second, not all terms
* are equal in a global sense. It is more meaningful to encounter a word that occurs rarely in the entire corpus
* than a word that appears in most of the documents, thus the metric uses the inverse of the word’s appearance in
* documents in the full corpus.
*
* The model relies on a few assumptions. It treats each document as a “bag of words,” meaning that it pays no
* attention to the ordering of words, sentence structure, or negations. By representing each term once, the model
* has difficulty dealing with polysemy, the use of the same word for multiple meanings. For example, the model
* can’t distinguish between the use of “band” in “Radiohead is the best band ever” and “I broke a rubber band.”
* If both sentences appear often in the corpus, it may come to associate “Radiohead” with “rubber.”
*
* The corpus has 10 million documents. Counting obscure technical jargon, the English language contains about
* a million terms, some subset in the tens of thousands of which is likely useful for understanding the corpus.
* Because the corpus contains far more documents than terms, it makes the most sense to generate the document-term
* matrix as a row matrix—a collection of sparse vectors—each corresponding to a document
* */
/********************************************************************************************************************
* **************************************** 14.3 Getting the Data ************************************************
* ***************************************************************************************************************/
/************************************* 14.3.1 Data preprocessing steps *********************************/
/* Getting from the raw Wikipedia dump into document-term matrix requires a set of preprocessing steps.
* - First, the input consists of a single enormous XML file with documents delimited by <page> tags. This needs
* to be broken up to feed to the next step.
* - Second turning Wiki-formatting into plain text. The plain text is then split into tokens
* - Third tokens are reduced from their different inflectional forms to a root term through a process
* called lemmatization
* - 4th These tokens can then be used to compute term and document frequencies.
* - 5th final step ties these frequencies together and builds the actual vector objects. In the book repo, all the
* code for performing these steps is encapsulated in the AssembleDocumentTermMatrix class.
*
* The first steps can be performed for each document fully in parallel (which, in Spark, means as a set of map
* functions), but computing the inverse document frequencies requires aggregation across all the documents. A number
* of useful general NLP and Wikipedia-specific extraction tools exist that can aid in these tasks.
*/
/* This Lesson use some external dependencies, you need to add the following maven dependencies into your project
* pom file */
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson14_Latent_Semantic_Analysis").master("local[2]").getOrCreate()
import spark.implicits._
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val filePath=s"${path}/spark_lessons/Lesson14_Latent_Semantic_Analysis/wikidump.xml"
/************************************* 14.3.1.1 Transform xml to plain text *********************************/
/* Here we use spark-xml lib to parse xml to dataSet, for more example, please check Lesson14_Spark_Xml_Parsing */
val allPages=spark.read.format("com.databricks.spark.xml").option("rowTag","page").load(filePath)
/* We only need two columns wiki page title and text*/
val rawText=allPages.withColumn("text",$"revision.text._VALUE").select("title","text")
//rawText.show(1,false)
/************************************* 14.3.1.2 Lemmatization *********************************/
/* With the plain text in hand, next we need to turn it into a bag of terms. This step requires care for a
* couple of reasons. First, common words like “the” and “is” take up space but at best offer no useful information
* to the model. Filtering out a list of stop words can both save space and improve fidelity. Second, terms
* with the same meaning can often take slightly different forms. For example, “monkey” and “monkeys” do not
* deserve to be separate terms. Nor do “nationalize” and “nationalization.” Combining these different inflectional
* forms into single terms is called stemming or lemmatization. Stemming refers to heuristics-based techniques
* for chopping off characters at the ends of words, while lemmatization refers to more principled approaches.
* For example, the former might truncate “drew” to “dr,” while the latter might more correctly output “draw.”
* The Stanford Core NLP project provides an excellent lemmatizer with a Java API that Scala can take advantage of.
* */
/* Build a library of not useful word, and broadcast to all nodes, (one copy per jvm instead of one copy per task).
* We abandoned the below solution, because spark provide now a StopWordsRemover transformer now*/
/*val stopWordPath="/DATA/data_set/spark/basics/Lesson14_Latent_Semantic_Analysis/stopWords.txt"
val stopWords = scala.io.Source.fromFile(stopWordPath).getLines().toSet
val bStopWords = spark.sparkContext.broadcast(stopWords)*/
/* The terms contains two column : 1st column is the page title, 2nd column is a list of terms extract from the page
* content by removing the stopWords*/
val terms=textToTerms(rawText)
/*terms.show(1,false)
val count=terms.count()
println(s"terms count : ${count} ")*/
/* 167 row in all wiki geometry pages */
/************************************* 14.3.1.3 Computing the TF-IDFs **************************************/
/* At this point, terms refers to a data set of sequences of terms, each corresponding to a document. The next step
* is to compute the frequencies for each term within each document and for each term within the entire corpus.
* The spark ML lib contains Estimator and Transformer implementations for doing exactly this.
* */
/* we rename the column with title and terms and remove all rows which terms size = 1, in our case, it removes
* nothing */
val termsDF= terms.toDF("title","terms")
val beforeFilteredCount=terms.count()
val filteredTerms=termsDF.filter(size($"terms")>1)
val afterFilteredCount=filteredTerms.count()
/* println(s"Before filter terms has ${beforeFilteredCount}, after filter terms has ${afterFilteredCount}")
filteredTerms.show(1,false)*/
val numTerms = 20000
/* CountVectorizer is an Estimator that can help compute the term frequencies for us. The CountVectorizer scans the
* data to build up a vocabulary, a mapping of integers to terms, encapsulated in the CountVectorizerModel, a
* Transformer. The CountVectorizerModel can then be used to generate a term frequency Vector for each document.
* The vector has a component for each term in the vocabulary, and the value for each component is the number of times
* the term appears in the document. Spark uses sparse vectors here, because documents typically only contain a small
* sub-set of the full vocabulary.*/
val countVectorizer = new CountVectorizer().
setInputCol("terms").setOutputCol("termFreqs").
setVocabSize(numTerms)
val vocabModel = countVectorizer.fit(filteredTerms)
val docTermFreqs = vocabModel.transform(filteredTerms)
/* Notice the use of setVocabSize. The corpus contains millions of terms, but many are highly specialized words that
* only appear in one or two documents. Filtering out less frequent terms can both improve performance and remove
* noise. When we set a vocabulary size on the estimator, it leaves out all but the most frequent words.
*
* */
docTermFreqs.cache()
docTermFreqs.show(1)
/* Now we have term frequency in each document, we need to calculate inverse document frequencies. IDF is another
* Estimator, which counts the number of the documents in which each term in the corpus appears and then uses these
* counts to compute the IDF scaling factor for each term. The IDFModel that it yields can then apply these scaling
* factors to each term in each vector in the data set.*/
val idf=new IDF().setInputCol("termFreqs").setOutputCol("tfidfVec")
val idfModel=idf.fit(docTermFreqs)
val docTermMatrix = idfModel.transform(docTermFreqs).select("title","tfidfVec","termFreqs")
//docTermMatrix.show(1)
/********************************* 14.3.1.4 Save the vec position and term string mapping **************************/
/* As we descend from data frames into the world of vectors and matrices, we lose the ability to key by strings.
* Thus, if we want to trace what we learn back to recognizable entities, it's important for us to save a mapping
* of positions in the matrix to the terms and document titles in our original corpus. Positions in the term vectors
* are equivalent to columns in our document-term matrix. The mapping of these positions to term strings is already
* saved in our CountVectorizerModel. We can access it with: */
val termIds:Array[String]=vocabModel.vocabulary
// println(s" termIds : ${termIds.mkString(";")}")
/* Creating a mapping of row IDs to document titles is a little more difficult. To achieve it, we can use the
* zipWithUniqueId function, which associates a unique deterministic ID with every row in the DataFrame. We rely
* on the fact that, if we call this function on a transformed version of the DataFrame, it will assign the same
* unique IDs to the transformed rows as long as the transformations don't change the number of rows or their
* partitioning. Thus, we can trace the rows back to their IDs in the DataFrame and, consequently, the document
* titles that they correspond to:*/
val docIds=docTermFreqs.rdd
//map transform (title,terms,termFreqs) to title
.map(_.getString(0))
// zip transform title to (title,ID)
.zipWithUniqueId()
// swap transform (title,ID) to (ID,title)
.map(_.swap)
// toMap transform tuple (ID,title) to a Map(ID->title)
.collect().toMap
// println(s"docIds ${docIds.toString()}")
/************************************* 14.3.2 Singular Value Decomposition *********************************/
/* With the document-term matrix M in hand, the analysis can proceed to the factorization and dimensionality
* reduction. Spark ML contains an implementation of the SVD that can handle enormous matrices. The singular value
* decomposition takes an m*n matrix and returns three matrices that approximately equal it when multiplied together:
* M ~= USV (P123 has more details)
*
* The matrices are :
* - U is an m*k matrix whose columns form an orthonormal basis for the document space. m is the number of doc
* - S is a k*k diagonal matrix, each of whose entries correspond to the strength of one the concepts, k is the
* number of concept.
* - V is a k*n matrix whose columns form an orthonormal basis for the term space. n is the number of term
*
* In the LSA case, m is the number of documents and n is the number of terms. The decomposition is parameterized with
* a number k, less than or equal to n, which indicates how many concepts to keep around. When k=n, the product of the
* factor matrices reconstitutes the original matrix exactly. When k<n, the multiplication results in a low-rank
* approximation of the original matrix. k is typically chosen to be much smaller than n. SVD ensures that the
* approximation will be the closest possible to the original matrix.
*
* */
/* At the time of writing this lesson, there is no SVD implementation in spark.ml, but spark.mllib has a SVD
* implementation which only operates on RDD. As a result, we need to transform our dataframe to mllib vectors based
* on RDDs*/
val vecRdd= docTermMatrix.select("tfidfVec").rdd
.map{row=>Vectors.fromML(row.getAs[MLVector]("tfidfVec"))
}
/* To find the singular value decomposition, we simply wrap an RDD of row vectors in a RowMatrix and call computeSVD */
vecRdd.cache()
val mat=new RowMatrix(vecRdd)
val k =1000
val svd=mat.computeSVD(k,computeU = true)
/* The RDD should be cached in memory beforehand because the computation requires multiple passes over the data.
* The computation requires O(nk) storage on the driver, O(n) storage for each task, and O(k) passes over the data.
* */
/************************************* 14.3.3 Finding Important Concepts *********************************/
/* SVD outputs a bunch of numbers. How can we inspect these to verify they actually relate to anything useful?
* The V matrix represents concepts through the terms that are important to them. As discussed earlier, V contains
* a column for every concept and a row for every term. The value at each position can be interpreted as the
* relevance of that term to that concept. This means that the most relevant terms to each of the top concepts can
* be found with something like this
* */
val topConceptTerms = topTermsInTopConcepts(svd, 4, 10, termIds)
val topConceptDocs = topDocsInTopConcepts(svd, 4, 10, docIds)
for ((terms, docs) <- topConceptTerms.zip(topConceptDocs)) {
println("Concept terms: " + terms.map(_._1).mkString(", "))
println("Concept docs: " + docs.map(_._1).mkString(", "))
println()
}
/* The output are following
Concept terms: sub, math, x, anatomy, d, phi_t, y, geodesic, 1, h
Concept docs: Computational anatomy, Busemann function, Large deformation diffeomorphic metric mapping, Minkowski space,
Complex reflection group, Riemannian metric and Lie bracket in computational anatomy, Diffeomorphometry,
Bayesian model of computational anatomy, Snub (geometry), Huzita–Hatori axioms
Concept terms: sub, y, x, h, d, sup, hadamard, gamma, busemann, harvnb
Concept docs: Busemann function, Complex reflection group, Huzita–Hatori axioms, Line moiré, Snub (geometry),
Minkowski space, Symmetry group, Volume and displacement indicators for an architectural structure,
Geometric separator, Infinitely near point
Concept terms: node_h, snub, cdd, bmatrix, node, br, uniform, sr, tiling, 60px
Concept docs: Snub (geometry), Complex reflection group, Minkowski space, Finite subdivision rule,
Schema for horizontal dials, Geometry processing, Strähle construction, Minkowski plane,
Coxeter decompositions of hyperbolic polygons, Laguerre plane
Concept terms: minkowski, u, mathbf, spacetime, sup, math, vectors, mvar, relativity, left
Concept docs: Minkowski space, Minkowski diagram, Minkowski plane, Fat object, Complex reflection group,
Base change theorems, Space, Superspace, Surface (mathematics), Geometry processing
We could say the grouping of terms and docs is pretty impressive. Even though unexpected words appear in each, all
the concepts exhibit some thematic coherence.
*/
/***************************************************************************************************************
******************* 14.4 Querying and Scoring with a Low-Dimensional Representation ********************
* *********************************************************************************************************/
/* How relevant is a term to a document? How relevant are two terms to each other? Which documents most closely
* match a set of query terms? The original document-term matrix provides a shallow way to answer these qutions.
* We can achieve a relevance score between two terms by computing the cosine similarity between their two column
* vectors in the matrix. Cosine similarity measures the angle between two vectors. Vectors that point in the same
* direction in the high-dimensional document space are thought to be relevant to each other. This is computed as the
* dot product of the vectors divided by the product of their lengths.
*
* Cosine similarity sees wide use as a similarity metric between vectors of term and document weights in natural
* language and information retrieval applications. Likewise, for two documents, a relevance score can be computed
* as the cosine similarity between their two row vectors. A relevance score between a term and document can simply
* be the element in the matrix at the intersection of both.
*
* However, these scores come from shallow knowledge about the relationships between these entities, relying on simple
* frequency counts. LSA provides the ability to base these scores on a deeper understanding of the corpus. For example,
* if the term "artilery" appears nowhere in a document on the "Normandy landing" article, but it mentions "howitzer"
* frequently, the LSA representation may be able to recover the relation between "artillery" and the article based
* on the co-occurrence of "artillery" and "howitzer" in other documents
*
* The LSA representation also offers benefits from an efficency standpoint. It packs the important information into
* a lower-dimensional representation that can be operated on instead of the original document-term matrix. Consider
* the task of finding the set of terms most relevant to a particular term. The naive approach requires computing
* the dot product between that term's column vector and every other column vector in the document-term matrix.
* This involves a number of multiplications proportional to the number of terms times the number of document.
* LSA can achieve the same by looking up its concept-space representation and mapping it back into term space,
* requiring a number of multiplication encodes the relevant patterns in the data, so the full corpus need not
* be queried.
*
* In the following section, we'll build a primitive query engine using the LSA representation of our data.
* */
/*********************************************** 14.4.1 Term-Term relevance ************************************/
/* LSA understands the relation between two terms as the cosine similarity between their tow columns in the
* reconstructed low-rank matrix; that is, the matrix that would be produced if the three approximate factors were
* multiplied back together. One of the ideas behind LSA is that this matrix offers a more useful representation of
* the data. It offers this in a few ways:
* - accounting for synonymy by condensing related terms
* - accounting for polysemy by placing less weight on terms that have multiple meanings
* - Throwing out noise
*
* However, we do not need actually calculate the contents of this matrix to discover the cosine similarity. Some
* linear algebra manipulation reveals that the cosine similarity between two columns in the reconstructed matrix
* is exactly equal to the cosine similarity between the corresponding columns in SV. Consider the task of finding
* the set of terms most relevant to a particular term. Finding the cosine similarity between a term and all other
* terms is equivalent to normalizing each row in VS to length 1 and then multiplying the row corresponding to that
* term by it. Each element in the resulting vector will contain a similarity between a term and the query term.
* */
/*P 131 To be continued*/
}
def textToTerms(data:DataFrame):DataFrame={
/* Build a regex tokenizer which only takes words into account */
val regexTokenizer=new RegexTokenizer()
.setInputCol("text")
.setOutputCol("rawTerms")
.setPattern("\\W")
/* transform the data */
val tokenizedData=regexTokenizer.transform(data)
/* build a stop word remover*/
val stopWordRemover=new StopWordsRemover()
.setInputCol("rawTerms")
.setOutputCol("terms")
val filteredTerms=stopWordRemover.transform(tokenizedData)
filteredTerms.select("title","terms")
}
/********************************** 14.3.3 Finding Important Concepts *******************************/
/*
* This method finds the topTerms for each top Concept*/
def topTermsInTopConcepts(svd:SingularValueDecomposition[RowMatrix,Matrix],numConcepts:Int,numTerms:Int,
termIds:Array[String]):Seq[Seq[(String,Double)]]={
/* V is a k*n matrix, k is the number of concept, n is the number of terms by using matrix V*/
val v=svd.V
val topTerms= new ArrayBuffer[Seq[(String, Double)]]()
// When we convert v to array, which is not a distributed data type, it will be stored in the driver process only
val arr = v.toArray
for (i <- 0 until numConcepts) {
val offs = i * v.numRows
val termWeights = arr.slice(offs, offs + v.numRows).zipWithIndex
val sorted = termWeights.sortBy(-_._1)
topTerms += sorted.take(numTerms).map {
/* This last step finds the actual terms that correspond to the positions in the term vectors. Recall
* that termIds is the integer -> term mapping we got from the CountVectorizer*/
case (score, id) => (termIds(id), score)
}
}
topTerms
/* Note that V is a matrix in local memory in the driver process, and the computation occurs in a non-distributed
* manner.*/
}
/* This method finds the documents relevant to each of the top concepts by using matrix U */
def topDocsInTopConcepts(svd: SingularValueDecomposition[RowMatrix, Matrix], numConcepts: Int, numDocs: Int,
docIds: Map[Long, String]): Seq[Seq[(String, Double)]] = {
val u = svd.U
println(s"u has type ${u.getClass.getName}")
val topDocs = new ArrayBuffer[Seq[(String, Double)]]()
for (i <- 0 until numConcepts) {
/* monotonically_increasing_id/zipWithUniqueId trick discussed in the previous section of this Lesson. This
* allows us to maintain continuity between rows in the matrix and rows in the DataFrame it is derived from,
* which also has the titles.*/
val docWeights = u.rows.map(_.toArray(i)).zipWithUniqueId()
topDocs += docWeights.top(numDocs).map {
case (score, id) => (docIds(id), score)
}
}
topDocs
/* Note that, in this code, U is stored as a distributed matrix.*/
}
/* This method takes a String of format xml (represents a page wiki), it returns a tuple (title:String,content:String)
* */
/************************************************************Appendix *********************************************/
/* We tried to used code in the book Advanced Analytics with spark, which needs the following maven dependencies. But
we have encontered many problems, so we use our own xml parsing and tokenizer code. Our code only requires databricks
spark-xml lib to work.
<!-- Dependencies for nlp (Lesson14) -->
<dependency>
<groupId>edu.stanford.nlp</groupId>
<artifactId>stanford-corenlp</artifactId>
<version>${corenlp.version}</version>
</dependency>
<dependency>
<groupId>edu.stanford.nlp</groupId>
<artifactId>stanford-corenlp</artifactId>
<version>${corenlp.version}</version>
<classifier>models</classifier>
</dependency>
<dependency>
<groupId>edu.umd</groupId>
<artifactId>cloud9</artifactId>
<version>2.0.1</version>
</dependency>
<dependency>
<groupId>info.bliki.wiki</groupId>
<artifactId>bliki-core</artifactId>
<version>3.1.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</dependency>
*/
/* There is a bug in the edu.umd.cloud9.collection.XMLInputFormat$XMLRecordReader.nextKeyValue which throws a
* java.lang.RuntimeException: bytes consumed error! */
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_4_Exo2.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson06_Spark_Streaming
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
/** In this exercise, we use a flume data source which output data to local socket 6666.
* The objective of this exercise is to use scala streaming to read and process data sent
* by flume.
*
* We suppose you have a flume agent which can output data, if not, see
* https://172.16.31.10/doku.php?id=employes:pengfei.liu:data_science:flume:start#flume_working_examples
* to build a flume agent
*
* In lesson6_1, we already used spark flume streaming (data is stored as RDD). Here, we will use the spark structure
* streaming (data is stored as DataSet/DataFrame),
* */
object Lesson06_4_Exo2 {
def main(args:Array[String])={
/* data source config*/
val host="127.0.0.1"
val port=6666
/* build spark session*/
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder.appName("Lesson6_5_Spark_Structure_Streaming").master("local[2]").getOrCreate()
import spark.implicits._
/* Read data from flume */
val lines=spark.readStream.format("socket").option("host",host).option("port",port).load()
//spark.readStream returns a untyped dataframe.
println(s"${lines.getClass.getName}")
lines.isStreaming // Returns True for DataFrames that have streaming sources
lines.printSchema
// As the dataframe is untyped, meaning that the schema of the Dataframe is not checked at compile time.
// only checked at runtime when the query is submitted. Some operations like map, flatMap, etc. need the type
// to be known at compile time. The below example, to use flatmap, we have converted the DataFrame to a
// Dataset of String using .as[String]
val words=lines.as[String].flatMap(_.split(" "))
val wordCounts = words.groupBy("value").count()
val query=wordCounts.writeStream.outputMode("complete").format("console").start()
query.awaitTermination()
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_5_3_1_Spark_ML.scala | package org.pengfei.Lesson05_Spark_ML
object Lesson05_5_3_1_Spark_ML {
/*********************************************************************************************************
* **********************************5.5.3 Spark Machine Learning pipelines API(Spark ML) ***************************
* ********************************************************************************************************/
/*
* Spark ML is another machine learning library that runs on top of Spark. It is a relatively newer library than
* MLlib. It became available starting with Apache Spark version 1.2. It is also referred to as the Spark Machine
* Learning Pipelines API.
*
* Spark ML provides a higher-level abstraction than MLlib for creating machine learning workflows or
* pipelines. It enables users to quickly assemble and tune machine learning pipelines. It makes it easy to
* create a pipeline for training a model, tuning a model using cross-validation, and evaluating a model with
* different metrics.
*
* Many classes and singleton objects provided by the MLlib library are also provided by the Spark ML
* library. In fact, classes and objects related to machine learning algorithms and models have the same names
* in both the libraries. The classes and singleton objects provided by the Spark ML library are available under
* the org.apache.spark.ml package.
*
* Generally, a machine learning task consists of the following steps:
* - 1. Read data.
* - 2. Preprocess or prepare data for processing.
* - 3. Extract features.
* - 4. Split data for training, validation, and testing.
* - 5. Train a model with a training dataset.
* - 6. Tune a model using cross-validation techniques.
* - 7. Evaluate a model over a test dataset.
* - 8. Deploy a model.
* */
/*********************************************************************************************************
* **********************************5.5.3.1 Spark ML datatype *****************************************
* ********************************************************************************************************/
/*
* Spark ML uses DataFrame as the primary data abstraction. Unlike in MLlib, the machine learning algorithms
* and models provided by Spark ML operate on DataFrames.
*
* As discussed in Lesson 4 spark sql, the DataFrame API provides a higher-level abstraction than the RDD API
* for representing structured data. It supports a flexible schema that allows named columns of different data
* types. For example, a DataFrame can have different columns storing raw data, feature vectors, actual label,
* and predicted label. In addition, the DataFrame API supports a wide variety of data sources.
*
* Compared to the RDD API, the DataFrame API also makes data preprocessing and feature extraction
* or feature engineering easier. Data cleansing and feature engineering are generally required before a model
* can be fitted on a dataset. These activities constitute the majority of the work involved in a machine learning
* task. The DataFrame API makes it easy to generate a new column from an existing one and add it to the
* source DataFrame.
*
*/
/*********************************************************************************************************
* **********************************5.5.3.2 Spark ML key features *****************************************
* ********************************************************************************************************/
/* Transformer
* A Transformer generates a new DataFrame from an existing DataFrame. It implements a method named
* transform, which takes a DataFrame as input and returns a new DataFrame by appending one or more new
* columns to the input DataFrame. A DataFrame is an immutable data structure, so a transformer does not
* modify the input DataFrame. Instead, it returns a new DataFrame, which includes both the columns in the
* input DataFrame and the new columns.
*
* Spark ML provides two types of transformers:
* - feature transformer
* - machine learning model.
*
* Feature Transformer
* A feature transformer creates one or more new columns by applying a transformation to a column in the
* input dataset and returns a new DataFrame with the new columns appended. For example, if the input
* dataset has a column containing sentences, a feature transformer can be used to split the sentences into
* words and create a new column that stores the words in an array.
*
* Model
* A model represents a machine learning model. It takes a DataFrame as input and outputs a new DataFrame
* with predicted labels for each input feature Vector. The input dataset must have a column containing feature
* Vectors. A model reads the column containing feature Vectors, predicts a label for each feature Vector, and
* returns a new DataFrame with predicted labels appended as a new column.
*
* Estimator
* An Estimator trains or fits a machine learning model on a training dataset. It represents a machine learning
* algorithm. It implements a method named fit, which takes a DataFrame as argument and returns a
* machine learning model.
*
* An example of an Estimator is the LinearRegression class. Its fit method returns an instance of the
* LinearRegressionModel class.
*
* Pipeline
* A Pipeline connects multiple transformers and estimators in a specified sequence to form a machine
* learning workflow. Conceptually, it chains together the data preprocessing, feature extraction, and model
* training steps in a machine learning workflow.
*
* A Pipeline consists of a sequence of stages, where each stage is either a Transformer or an Estimator.
* It runs these stages in the order they are specified.
* A Pipeline itself is also an Estimator. It implements a fit method, which takes a DataFrame as
* argument and passes it through the pipeline stages. The input DataFrame is transformed by each stage.
*
* The fit method returns a PipelineModel, which is a Transformer.
* A Pipeline’s fit method calls the transform method of each Transformer and fit method of each
* Estimator in the same order as they are specified when a Pipeline is created. Each Transformer takes a
* DataFrame as input and returns a new DataFrame, which becomes the input for the next stage in the
* Pipeline. If a stage is an Estimator, its fit method is called to train a model. The returned model, which is a
* Transformer, is used to transform the output from previous stage to produce input for the next stage.
*
* PipelineModel
* A PipelineModel represents a fitted pipeline. It is generated by the fit method of a Pipeline. It has the same
* stages as the Pipeline that generated it, except for the Estimators, which are replaced by models trained by
* those estimators. In other words, all the Estimators are replaced by Transformers.
*
* Unlike a Pipeline, which is an Estimator, a PipelineModel is a Transformer. It can be applied to a dataset
* to generate predictions for each observation. In fact, a PipelineModel is a sequence of Transformers. When
* the transform method of a PipelineModel is called with a DataFrame, it calls the transform method of
* each Transformer in sequence. Each Transformer’s transform method outputs a new DataFrame, which
* becomes the input for the next Transformer in the sequence.
*
*
* Evaluator
* An Evaluator evaluates the predictive performance or effectiveness of a model. It provides a method named
* evaluate, which takes a DataFrame as input and returns a scalar metric. The input DataFrame passed as
* argument to the evaluate method must have columns named label and prediction.
*
* Grid Search
* The performance or quality of a machine learning model depends on the hyperparameters provided to a
* machine learning algorithm during model training. For example, the effectiveness of a model trained with
* the logistic regression algorithm depends on the step size and number of gradient descent iterations.
*
* Unfortunately, it is difficult to pick the right combination of hyperparameters for training the
* best model. One of the techniques for finding the best hyperparameters is to do a grid search over a
* hyperparameter space. In a grid search, models are trained with each combination of hyperparameters from
* a specified subset of the hyperparameter space.
*
* For example, consider a training algorithm that requires two real-valued hyperparameters: p1 and p2.
* Rather than guessing the best values for p1 and p2, we can do a grid search over p1 values 0.01, 0.1, and 1,
* and p2 values 20, 40, and 60. This results in nine different combinations of p1 and p2. A model is trained and
* evaluated with each combination of p1 and p2. The combination that trains a model with the best evaluation
* metric is selected.
*
* Grid search is expensive, but it is a better approach for hyperparameter tuning than guessing the
* optimal value for each hyperparameter. Generally, it is a required step to find a model that performs well.
*
* CrossValidator
* A CrossValidator finds the best combination of hyperparameter values for training the optimal model for a
* machine learning task. It requires an Estimator, an Evaluator and a grid of hyperparameters.
*
* A CrossValidator uses k-fold cross-validation and grid search for hyperparameter and model tuning.
* It splits a training dataset into k-folds, where k is a number specified by a user. For example, if k is 10, a
* CrossValidator will generate 10 pairs of training and test dataset from the input dataset. In each pair, 90% of
* the data is reserved for training and remaining 10% is held-out for testing.
* Next, it generates all the combinations of the hyperparameters from user-specified sets of different
* hyperparameters. For each combination, it trains a model with a training dataset using an Estimator and
* evaluates the generated model over a test dataset using an Evaluator. It repeats this step for all the k-pairs of
* training and test datasets, and calculates the average of the specified evaluation metric for each pair.
* The hyperparameters that produce the model with the best averaged evaluation metric are selected
* as the best hyperparameters. Finally, a CrossValidator trains a model over the entire dataset using the best
* hyperparameters.
* Note that using a CrossValidator can be very expensive since it tries every combination of the
* hyperparameters from the specified parameter grid. However, it is a well-established method for choosing
* optimal hyperparameter values. It is statistically a better method than heuristic hand-tuning.
*
*
* */
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_4_Exo1.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson06_Spark_Streaming
import org.apache.spark._
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming._
import org.apache.spark.streaming.twitter._
import twitter4j.Status
object Lesson06_4_Exo1 {
/* In this exercise, let’s develop a complete Spark Streaming application so that you can see how the classes
* and methods discussed in earlier sections come together in an application. You will create an application
* that shows trending Twitter hashtags.
*
* In tweeter, we use # to mark a topic. People add the hashtag symbol before a word to categorize a tweet. A tweet may
* contain zero or more hashtagged words.
*
* Twitter provides access to its global stream of tweets through a streaming API. You can learn about this
* API on Twitter’s web site at https://dev.twitter.com/streaming/overview.
*
* To get access to the tweets through Twitter’s streaming API, you need to create a Twitter account and
* register your application. An application needs four pieces of authentication information to connect to
* Twitter’s streaming API: consumer key, consumer secret, access token, and access token secret. You can
* obtain these from Twitter. If you have a Twitter account, sign-in or create a new account. After signing-in,
* register your application at https://apps.twitter.com to get all the authentication credentials.
*
* Let’s create a Spark Streaming application that tracks hash tagged words and shows the ones that are
* trending or gaining popularity. Application source code is shown next, followed by code explanation.*/
def main(args:Array[String])={
//Twitter connection info
val consumerKey="9F2cDP6mBO001MJtFyLybWGqT"
val consumerSecret="<KEY>"
val accessToken="<KEY>"
val accessTokenSecret="<KEY>"
// Set the system properties so that Twitter4j library used by twitter stream
// can use them to generate OAuth credentials
System.setProperty("twitter4j.oauth.consumerKey", consumerKey)
System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret)
System.setProperty("twitter4j.oauth.accessToken", accessToken)
System.setProperty("twitter4j.oauth.accessTokenSecret", accessTokenSecret)
// spark streaming info
val batchInterval = 10
// The threshold variable is used to treat late data. See Lesson6_3
val minThreshold = 20
// path for streaming context checkpoint
val checkPointPath="/tmp/spark/check-point"
//build streaming context with a spark session
val spark=SparkSession.builder().appName("Lesson6_4_Exo1_TwitterPoplularHashTag").master("locat[2]").getOrCreate()
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
ssc.checkpoint(checkPointPath)
//tweeter filter
val filters = Array("Trump","China")
//Get tweeter dstream with a filter
val tweetDStream = TwitterUtils.createStream(ssc, None, filters)
//We can also filter the tweets by language
val language="en"
val tweetsFilteredByLang = tweetDStream.filter{tweet=>tweet.getLang()==language}
//Get tweet text and split to word list
val statuses=tweetsFilteredByLang.map(_.getText)
val words=statuses.flatMap{status=>status.split("""\s+""")}
//get hashTags
val hashTags=words.filter{word=>word.startsWith("#")}
val hashTagPairs=hashTags.map{tag=>(tag,1)}
val tagsWithCounts=hashTagPairs.updateStateByKey(
(counts:Seq[Int],prevCount:Option[Int])=>prevCount.map{c=>c+counts.sum}.orElse{Some(counts.sum)}
)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/tmp/Realism_Data_Curation.scala | <reponame>pengfei99/Spark<filename>LearningSpark/src/main/java/org/pengfei/tmp/Realism_Data_Curation.scala
package org.pengfei.Lesson17_Analyze_Clinical_Data
import org.apache.log4j.{Level, Logger}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StringType
import scala.collection.immutable.ListMap
object Realism_Data_Curation {
/************************************************************************************************************
* ***************************** 17.1 Introduction *********************************************************
* *********************************************************************************************************/
/* In this Lesson, we will learn how to transform a dataset into a specific format. The raw dataset is provided by
* a hospital study, we need to transform it and put it in a bio data warehouse named transmart. Transmart is not a
* real data warehouse in a computer scientist opinion but its close enough. In this lesson, we will learn:
* - read data from excel
* - build new columns based on duplicate rows,
* - change cell values of a columns
* - deal with duplicates rows
* - deal with null values*/
/******************************************* Configuration ***************************************/
val csvFile="/DATA/data_set/spark/basics/Lesson17_Analyse_Clinical_Data/raw_data.csv"
val outputPath="/DATA/data_set/spark/basics/Lesson17_Analyse_Clinical_Data"
val nullValue="null"
val timePointColName="Time_Point"
val patientIdColName="Patient"
val separator="_"
//config for output csv to match with transmart requirements
val studyID="Realism01"
val subjID="SUBJ_ID"
val outputCsvDelimiter="\t"
def main(args:Array[String]):Unit= {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().appName("Realism_Data_Curation").master("local[2]").getOrCreate()
import spark.implicits._
/******************************************************************************************************
* ************************************17.2 Preliminary analyze on raw data *****************************
* ******************************************************************************************************/
/* Before we start the transformation, we need to understand the data. */
val csvDF=spark.read.option("inferSchema", true).option("header",true)
.option("nullValue"," ")
.option("encoding", "UTF-8")
.option("delimiter",",").csv(csvFile)
csvDF.cache()
//csvDF.show(1)
/**********************get raw data size (i.e. rows and columns)************************/
/*val columnNum=csvDF.columns.length
val rowNum=csvDF.count()
println(s"csv column Num is $columnNum, row number is $rowNum")*/
/* csv column Num is 470, row number is 117124 */
//csvDF.printSchema()
/*********************** get rows of each patient and all possible row numbers*********************/
/* With the below code, we know we have multiple rows for one patient, but the target warehouse only allow one
* row for each distinct patient, now we need to know why we have multiple rows.*/
val patientRowCount=csvDF.groupBy("Patient").count()
//patientRowCount.select("count").distinct().show(10)
/* all possible patient row number
| 188|
| 94|
| 235|
| 141|
| 329|
| 282|
| 47|*/
/****************** Discover the data is time related *********************************/
/* We find the column TP_Class describes the date of when the data is recorded, so we can conclude that the data
* is time related*/
val timePointValues=csvDF.select("TP_Class").distinct().orderBy($"TP_Class".asc)
// timePointValues.show()
/*
* D28|
| D5-D7
| D60|
| HV|
| D1-D2|
| D1|
| D3-D4|
| D2|
| D0|
| D14|
* */
/****************** Discover that all the patients belong to 5 sub group *********************************/
/******************************* get all possible group *******************************/
val allGroups=csvDF.select("Subgroup").distinct()
// allGroups.show(10)
/********************************** get all group patient row count ************************/
val allGroupsCount=csvDF.groupBy("Subgroup").count()
// allGroupsCount.show(10)
/*
+--------+-----+
|Subgroup|count|
+--------+-----+
| HV| 8225|
| Sepsis|26696|
| Trauma|42253|
| Burn| 5969|
| Surgery|33981|
+--------+-----+
* */
/********************* Analysis patient of each sub group *********************************/
/* We do a raw analysis on Healthy patient HV, we discover that Healthy patient only done 1 medical visit, so each
* patient has 47 row, which correspond the 47 marker, we will not show the result of other groups here.
* */
// val HV=getStatsOfEachSubGroup(csvDF,"HV")
/*val hvFile="/DATA/data_set/spark/basics/Lesson17_Analyse_Clinical_Data/realsim/HV/HV.csv"
val HV=spark.read.option("inferSchema", true).option("header",true).option("nullValue"," ").option("delimiter",",").csv(hvFile)*/
/*val HVMarkerCount=HV.filter($"Patient"===5001).select("Marker").distinct().count()
println(s"The marker count is ${HVMarkerCount}")*/
/* The marker count is 47, so we know that for HV, each row represents a different marker */
//val tmp=HV.groupBy("Patient").reduceGroups
//HVMarker.show(5)
/*Platform */
/* val platformCount=HV.filter($"Patient"===5001).select("Platform").distinct()
platformCount.show(10)
val platformMarkerRollup=HV.filter($"Patient"===5001).rollup($"Platform",$"Marker").agg(first("Value").as("Value")).sort(asc("Platform"))
platformMarkerRollup.show(47)*/
/* val gene=HV.select("Patient","Platform","Marker","Value")
gene.show(10)*/
/*HV.coalesce(1).write.mode(SaveMode.Overwrite)
.option("header","true")
.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of job success files
.option("encoding", "UTF-8")
.csv(outputPath+"/HV")*/
/* All HV patient has 47 rows
+-----+
|count|
+-----+
| 47|
+-----+*/
/* All HV have 1 time point HV (i.e. day 1)
* +--------+
|TP_Class|
+--------+
| HV|
+--------+
* */
/***********************************************************************************************************
* ************************************17.3 Change date value for easier sorting *****************************
* ******************************************************************************************************/
/* We noticed that with the current time point column, when we do sorting, the value does not sort in good order
* So we need to change the value as shown below
* */
/*
| D0->D00|
| D1->D01|
| D1-D2->D01-D02|
| D14->D14|
| D2->D02|
| D28->D28|
| D3-D4->D03-D04|
| D5-D7->D05-D07|
| D60->D60|
| HV->D00|
* */
val dfWithTP= ModifyTimePoint(csvDF)
/***********************************************************************************************************
* ************************** 17.4 Build column Based on the patient time point row ***************************
* ******************************************************************************************************/
/* As our raw data has multiple rows on a patient, each row represent specific data collect at a specific time point.
* We have two different scenarios :
* Scenario 1. We don't have the explicite column name, we need to build column name for each value
* for example, we have Patient_id | Time_Point | Coag_Sofa_Score
* 1004
* |-- D0
* |- v1
*
* |-- D1
* |- v2
* All the rows in which columns such as age, sex, etc will have duplicate data for patient 1004
* To eliminate all duplicate data and make data more easier to load into the data warehouse, we need to
* transform all the rows into columns
* For example, the new dataframe should looks like
* Patient_id | D0_Coag_Sofa | D1_Coag_Sofa_Score
* 1004 | v1 | v2
*
* Scenario 2. We have column name in the row, for example
* Patient_id | Time_Point | marker_name | marker_value
* 1004
* |-- D0
* |- n1 | v1
* |- n2 | v2
* |- ...
* |-- D1
* |- n1 | v1
*
* The output must be
*
* * Patient_id | D0_n1 | D0_n2 | ... | D1_d1n1 | ...
* 1004 | v1 | V2 | ... | d1v1
*
*
* */
/***********************************************************************************************************
* ************************** 17.4.1 SOFA time point related data treatment ***************************
* ******************************************************************************************************/
/* SOFA data is in scenario 1 */
/*val allColumns=Array(patientIdColName,timePointColName,"CBD_Cardio_SOFA","CBD_Coag_SOFA",
"CBD_Dobut_SOFA","CBD_Hepat_SOFA",
"CBD_Neuro_SOFA","CBD_Renal_SOFA",
"CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA")
val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA",
"CBD_Dobut_SOFA","CBD_Hepat_SOFA",
"CBD_Neuro_SOFA","CBD_Renal_SOFA",
"CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA")
*/
/*val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA")*/
val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA",
"CBD_Dobut_SOFA","CBD_Hepat_SOFA",
"CBD_Neuro_SOFA","CBD_Renal_SOFA",
"CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA")
val utilityColumns=Array(patientIdColName,timePointColName)
/*// build a small test dataset
val patient1088=dfWithTP.filter($"Patient"===1088)
val sofaTest=BuildColumnsWithTimePointS1(patient1088,sofaValueColumns,utilityColumns)
val sofaTestRowNum=sofaTest.count()
val sofaTestColNum=sofaTest.columns.length
println(s"sofa row number is ${sofaTestRowNum}, sofa column number is ${sofaTestColNum}")*/
/*val sofaTPData=BuildColumnsWithTimePointS1(dfWithTP,sofaValueColumns,utilityColumns)
val sofaRowNum=sofaTPData.count()
val sofaColNum=sofaTPData.columns.length*/
// sofa row number is 552, sofa column number is 82
// println(s"sofa row number is ${sofaRowNum}, sofa column number is ${sofaColNum}")
/***********************************************************************************************************
* ************************** 17.4.2 CBD time point related data treatment ***************************
* ******************************************************************************************************/
val cbdValueColumns = Array("CBD_AdreN","CBD_AdreN_Dose","CBD_NorAdreN","CBD_NorAdreN_Dose",
"CBD_Dobut_Dose","CBD_ALAT","CBD_ASAT","CBD_Bilirub","CBD_Creat","CBD_Diuresis","CBD_Eosino","CBD_Leuco",
"CBD_Lympho","CBD_Mono","CBD_Neutro","CBD_FiO2", "CBD_PaO2FiO2","CBD_MAP","CBD_PAL",
"CBD_Plat","CBD_Hb","CBD_Lactate","CBD_pH","CBD_Glasgow","CBD_Presence_Mecha_Ventil","CBD_PCT")
/*val cbdTPData=BuildColumnsWithTimePointS1(dfWithTP,cbdValueColumns,utilityColumns)
cbdTPData.show(5,false)
val cbdRowNum=cbdTPData.count()
val cbdColNum=cbdTPData.columns.length
//cbd row number is 552, cbd column number is 235
println(s"cbd row number is ${cbdRowNum}, cbd column number is ${cbdColNum}")*/
/***********************************************************************************************************
* ************************** 17.4.3 BioMarker time point related data treatment ***************************
* ******************************************************************************************************/
/* BioMarker data is in scenario 2*/
/* raw dataframe
+-------+----------+---------------+--------+----------------+------------------+-------------+
|Patient|Time_Point| Platform| Marker| Value|Missing_Value_Type|Imputed_Value|
+-------+----------+---------------+--------+----------------+------------------+-------------+
| 5001| D00| ELLA_EDTA| IL6| 1,964| null| 0|
| 5001| D00|ELLA_TRUCULTURE| IL2_NUL| null| OOR<| 0|
result dataframe (build 3 new column)
Column name rules: platform-BioMarker-TimePoint-(value|Missing_Value_Type|Imputed_Value)
+-------+--------------------------+
|Patient| ELLA_EDTA-IL6-D00-value| ELLA_EDTA-IL6-D00-Missing_Value_Type | ELLA_EDTA-IL6-D01-Imputed_Value
+-------+--------------------------+
| 5001| 1,964| null | 0(0->false, 1->true)
-Biomarker
|--Platform
|--ELLA_EDTA
|--IL6
|--D0
| value
| Missing_Value_Type
| Imputed_Value
|--D1
|--...
|--IL10
|--ELLA_TRUCULTURE
|--TNFa_LPS
|--...
So the total column_number= distinct(platform+marker)*timePoint*3
*/
/*// build a small test dataset
val patient1088=dfWithTP.filter($"Patient"===1088)
//tested with different platform sub dataset, for example, ELLA_EDTA only has two rows for patient 1088
val markerTestDf=patient1088.filter($"Platform"==="ELLA_TRUCULTURE")
markerTestDf.show(5,false)
val test=BuildColumnsWithTimePointS2(markerTestDf)
test.show(5, false)*/
/* Test with full dataset*/
/* val bioMarkerWithTPColData=BuildColumnsWithTimePointS2(dfWithTP)
bioMarkerWithTPColData.show(5,false)
val rowNum=bioMarkerWithTPColData.count()
val colNum=bioMarkerWithTPColData.columns.length
// The row number is 552, the column number is 1270, the row number is 552 which is correct of the total patient number
println(s"The row number is ${rowNum}, the column number is ${colNum}")*/
/***********************************************************************************************************
* ******************************************* 17.5 Export data ******************************************
* ******************************************************************************************************/
ExportDemographicData(csvDF)
//ExportSeverityRiskFactor(csvDF)
//ExportSofaD1Data(csvDF)
//ExportSofaTPData(dfWithTP)
//ExportDataWithoutTP(csvDF)
}
/**
* This method transform the raw data of bioMarker Value/Missing_Value_Type/Imputed_Value to column with platform
* name, marker name and time point
*
* @author <NAME>
* @version 1.0
* @since 2018-12-28
* @param rawDf The source data frame in which the transformation will take place.
* @return a data frame which contains the result of the transformation
* */
def BuildColumnsWithTimePointS2(rawDf:DataFrame):DataFrame={
val spark=rawDf.sparkSession
import spark.implicits._
/* In our case, the column which we want to transform are fixed, and we only called it once, so no need to set in
* the config. */
val bioMarkerValueCol=Array("Value","Missing_Value_Type","Imputed_Value")
val bioMarkerFiledIdCol=Array("Platform","Marker")
val bioMarkerUtilityCol=Array(patientIdColName,timePointColName)
val bioMarkerCol=bioMarkerUtilityCol.union(bioMarkerFiledIdCol).union(bioMarkerValueCol)
/*bioMarkerData.printSchema()
val allValue=bioMarkerData.count()
val nonNullValue=bioMarkerData.filter($"Value".isNotNull).count()
println(s"All value count is ${allValue}, nonNullValue count is ${nonNullValue}")*/
/* val allPlateformMarkerTP=bioMarkerDataWith3FiledIdName.select("marker_tp").distinct().collect().toArray
// All possible platform biomarker and time point combination number is 423, so we will have 423*3 more columns
println(s"All possible platform biomarker and time point combination ${allPlateformMarkerTP.length}")*/
/*Step 0: clean the raw dataset, get only biomarker related columns and fill the null value with string "null"*/
val bioMarkerData=rawDf.select(bioMarkerCol.head,bioMarkerCol.tail:_*).dropDuplicates().orderBy(asc(patientIdColName))
val df=bioMarkerData.na.fill(nullValue)
df.show(5)
val bioMarkerDataWith3FiledIdName=df.withColumn("tmp",concat(col(bioMarkerFiledIdCol(0)),lit(separator),col(bioMarkerFiledIdCol(1))))
.withColumn("marker_tp",concat($"tmp",lit(separator),col(timePointColName)))
.drop("tmp")
.withColumn("marker_Value",concat($"marker_tp",lit("/Value")))
.withColumn("marker_Missing_Value_Type",concat($"marker_tp",lit("/Missing_Value_Type")))
.withColumn("marker_Imputed_Value",concat($"marker_tp",lit("/Imputed_Value")))
bioMarkerDataWith3FiledIdName.show(5,false)
var result=df.select(patientIdColName).distinct().sort(asc(patientIdColName))
for(filedValueColName<-bioMarkerValueCol){
println(s"Current working column name : ${filedValueColName}")
val filedIdColName="marker_"+filedValueColName
val inter=RowToColumn(bioMarkerDataWith3FiledIdName,patientIdColName,filedIdColName,filedValueColName)
result=result.join(inter,Seq(patientIdColName),"inner")
}
result.show(1, false)
//Sort the output column so the Value/Missing_Value_Type/Imputed_Value of a marker are together
val sortedColumnName=Array("Patient")++result.columns.sorted.filter(!_.equals("Patient"))
println(s"The sorted Column Name is ${sortedColumnName.mkString(";")}")
result=result.select(sortedColumnName.head,sortedColumnName.tail:_*)
return result
}
/**
* This method transform the raw data of scenario 1 to column with time point
*
* @author <NAME>
* @version 1.0
* @since 2018-12-20
* @param df The source data frame in which the transformation will take place.
* @param allColumns allColumns is a list of String which contains all the column name
* the name of the newly created column name.
* @param utilityColumns utilityColumns
* become a new row in the corresponding filed column
* @return a data frame which contains the result of the transformation
* */
def BuildColumnsWithTimePointS1(df:DataFrame,valueColumns:Array[String],utilityColumns:Array[String]):DataFrame={
/* change row to column with time_point in consideration
* here is the origin data frame
*
+-------+----------+---------------
|Patient|Time_Point|CBD_Cardio_SOFA|
+-------+----------+---------------+
| 1004| D03-D04| 1|
| 1004| D05-D07| 1|
| 1004| D28| null|
| 1004| D01-D02| 4|
| 1007| ...
*
* Step.1 . Build filedId column
*
|Patient|CBD_Cardio_SOFA_ID|CBD_Cardio_SOFA_Value|
+-------+----------+---------------+
| 1004| CBD_Cardio_SOFA.D03-D04| 1 |
| 1004| CBD_Cardio_SOFA.D05-D07| 1 |
| 1004| CBD_Cardio_SOFA.D28| null|
| 1004| CBD_Cardio_SOFA.D01-D02| 4 |
| 1007| ...
* */
val spark=df.sparkSession
import spark.implicits._
/*Step1. if filedId column does not exit, create filedId column */
//Get all filed value column name
val allColumns=valueColumns.union(utilityColumns)
println(s"allColumns ${allColumns.mkString(";")}")
//Get all time point
val allColumnData=df.select(allColumns.head,allColumns.tail:_*).dropDuplicates().orderBy(asc(patientIdColName))
allColumnData.show(10)
/* no need to get the array of timePoint
val timePoint=sofa_TPData.select("TP_Class").distinct().collect().map(_.get(0))*/
var tmp=allColumnData
for(valueColumn<-valueColumns){
tmp=tmp.withColumn("tmp",lit(valueColumn))
/* do not put . in the column name, spark will think you want to access an attribute of the columne*/
.withColumn(valueColumn+"_Id",concat($"tmp",lit(separator),col(timePointColName)))
.drop("tmp")
}
tmp.show(10)
// tmp.printSchema()
/* Here we need to loop over all elements in value column,*/
var result=tmp.select(patientIdColName).distinct().sort(asc(patientIdColName))
result.show(5)
for(filedValueColumn<-valueColumns){
val filedColumnId=filedValueColumn+"_Id"
val inter=RowToColumn(tmp,patientIdColName,filedColumnId,filedValueColumn)
result=result.join(inter,Seq(patientIdColName),"inner")
result.show(10)
}
return result
}
/**
* This method transform multi rows of an object into columns, after the transformation, for each object we only have
* one row in the data frame. To make the transformation, this method cast all column to type string, as we don't do
* arthimetic operations here. So it won't be a problem. You can change the type back to Int, or double after the
* transformation.
* @author <NAME>
* @version 1.0
* @since 2018-12-20
* @param df The source data frame in which the transformation will take place.
* @param targetIdColumnName The column in the data frame which contains the name of the filed. Each row will become
* the name of the newly created column name.
* @param targetValueColumnName The column in the data frame which contains the value of the filed. Each row will
* become a new row in the corresponding filed column
* @return a data frame which contains the result of the transformation
*
* */
def RowToColumn(rawDf: DataFrame, objectIdColumnName:String,targetIdColumnName: String,targetValueColumnName:String): DataFrame = {
val spark = rawDf.sparkSession
import spark.implicits._
/* Step0. Eliminate all null rows, it may create a null dataframe (dataframe without rows), we can't build columns
* with no rows, so we need to fill null with a null value which will not cause null pointer exception.
* As a result, we cast all columns to string type and fill the null cell with pre-defined nullValue(String)*/
val df = rawDf.select(rawDf.columns.map(c => col(c).cast(StringType)) : _*).na.fill(nullValue)
/* Step1. Get all possible filedIDs, which will be become the column name of each filed */
val filedIDs = df.select(targetIdColumnName).distinct().orderBy(df(targetIdColumnName).asc)
filedIDs.show(10,false)
// convert the column in the data frame which contains the filed Ids to an Array of the filed Ids.
val filedIDsArray: Array[String] = filedIDs.collect().map(_.get(0).toString)
/* Step2. Build the (filedId,filedValue) <key,value> map for each row. */
/* We have two solutions to do this.
* Solution 1 : build a user define function which build a map
* Solution 2 : Spark provide map function which can build a map based on two columns
* Here we choose Solution 2 , spark native function is always better than udf.*/
// Solution 1: If we don't fill null value before, here we need to use Option type to avoid null pointer
/*def buildFiledMap(filedName:String,filedValue:String):Map[String,Option[String]]={
if(filedValue.isEmpty) Map(filedName->None)
else Map(filedName->Option(filedValue))
}
spark.udf.register("buildFiledMap",(arg1:String,arg2:String)=>buildFiledMap(arg1,arg2))
val filedIdValueMap=df.withColumn("filed_map",expr(s"buildFiledMap(${targetIdColumnName},${targetValueColumnName})"))*/
/* def buildFiledMap(filedName:String,filedValue:String):Map[String,String]={
if(filedValue.isEmpty) Map(filedName->"null")
else Map(filedName->filedValue)
}
spark.udf.register("buildFiledMap",(arg1:String,arg2:String)=>buildFiledMap(arg1,arg2))
val filedIdValueMap=df.withColumn("filed_map",expr(s"buildFiledMap(${targetIdColumnName},${targetValueColumnName})"))
*/
/* Solution 2 : The spark native map function
* The map function by default does not deal with null value, so if we have null value in the two columns you will
* have x->, or ->y, when you have functions to call these null values, you will have null pointer exception.
* The solution is to fill the null value with a string "null",
**/
val filedIdValueMap = df.withColumn("filed_map", map(df(targetIdColumnName), df(targetValueColumnName)))
filedIdValueMap.show(5,false)
/* Step3. Group the (filedId,filedValue) map for each distinct subject which may have multiple rows. Each row has
* a map. After group, we concatenate all maps of a subject into one single map. Here, we used collect_list, there is
* another similar function collect_set, which list returns an ordered sequence of elements, set returns an unordered
* distinct list of elements, we know that, we will not have duplicate filedId for one subject. so we don't need to use
* set, we prefer to use list.*/
val groupedFiledIdValueMap = filedIdValueMap.groupBy(objectIdColumnName)
.agg(collect_list("filed_map")) // return a list of map
.as[(String, Seq[Map[String, String]])] // <-- leave Rows for typed pairs
.map { case (id, list) => (id, list.reduce(_ ++ _)) } // <-- concatenate all maps to a single map
.toDF(objectIdColumnName, "filed_map")
groupedFiledIdValueMap.show(10, false)
/* Step 4. Create column for each fieldId based on the complete fieldId list, with the getFiledValue function,
* */
val bFiledIDsArray: Broadcast[Array[String]] = spark.sparkContext.broadcast(filedIDsArray)
def getFiledValue(filedId: String, filedMap: Map[String, String]): String = {
//you can replace the empty (null) value as you want, here I tried empty string "", "null" and "."
if(filedMap.isEmpty||filedId.isEmpty){nullValue}
else {
filedMap.getOrElse(filedId, nullValue)
}
}
//spark.udf.register("getFiledValue", (arg1: String, arg2: Map[String, String]) => getFiledValue(arg1, arg2))
spark.udf.register("getFiledValue", getFiledValue(_:String, _: Map[String, String]))
var tmpDf = groupedFiledIdValueMap
(0 until bFiledIDsArray.value.length).map { i =>
val filedId: String = bFiledIDsArray.value(i)
tmpDf = tmpDf.withColumn("current_id", lit(filedId))
.withColumn(filedId, expr("getFiledValue(current_id,filed_map)"))
.drop("current_id")
// The solution which takes a variable and a column does not work, because, the udf only allows column type as argument
//
//tmpDf=tmpDf.withColumn(filedId,getFiledValue(filedId,filed_map)))
}
val result=tmpDf.drop("filed_map")
result.show(5,false)
result
}
/*************************************************************************************************************
* *************************** 17.5.1 Prepare not time related data and export them ***************************
* ******************************************************************************************************/
/******************** 17.5.1.1 Prepare demographic data and export them ******************/
def ExportDemographicData(df:DataFrame):Unit={
val spark=df.sparkSession;
import spark.implicits._
// prepare demographicColumns, as demographicColumns are not time point related, so with drop duplicates, we get one
// row per patient
val demographicColumns=Array("Patient","Subgroup","DD_Gender","DD_Calculated_Age","DD_Height","DD_Weight","DD_BMI")
val demographicData=df.select(demographicColumns.head, demographicColumns.tail: _*).dropDuplicates().orderBy($"Patient".asc)
demographicData.show(10)
// column rename map
val nameMap=Map(("DD_Gender","Sex"),("DD_Calculated_Age","Age"),("DD_Height","Height"),("DD_Weight","Weight"),("DD_BMI","BMI"))
/* Step 1 : normalize data for transmart format*/
val demoForTransmart=NormalizeColNameForTransmart(demographicData,demographicColumns)
demoForTransmart.show(10,false)
/* Step 2 : change column name*/
val demoRenamedDf=ChangeColName(demoForTransmart,nameMap)
demoRenamedDf.show(10,false)
/* Step 3 : check null value */
countNullValue(demoRenamedDf)
/* The null value count of the column STUDY_ID is 0
The null value count of the column SUBJ_ID is 0
The null value count of the column Subgroup is 0
The null value count of the column Sex is 0
The null value count of the column Age is 0
The null value count of the column Height is 6
The null value count of the column Weight is 3
The null value count of the column BMI is 6*/
/* Step 4 : fill null value with transmart required value (. for digit, Not Available for string)*/
/* We know Height, Weight, BMI are all digit columns, so we replace them with .
* ,"Weight","BMI"*/
val demoFinalData=fillTransmartNullForDigitCol(demoRenamedDf,Array("Height"),nullValue)
countNullValue(demoFinalData)
/* Step 5 : output data to disk */
WriteDataToDisk(demoFinalData,"/tmp/Realism","demographic_data")
// Get the first line for each patient, We have two solution, we can use .groupBy().agg(first(...),...)
// We can also just keep one row by eliminating all duplicates rows.
//finalDemographData.show(5)
// You want the max/min value(first) of one column, groupBy does not guarantee the order by default,
// you have to do sort before use first
// example with groupBy and agg(first). But be careful, if the rows are not completely identical, the result may be
// not reliable, because groupBy does not guarantee order, you have to do sort before first to get always the same
// first line
/* //Get the first element of each column
val DemographDataFirst=demographicData.groupBy("Patient").agg(first("DD_Gender").as("Gender"),
first("DD_Calculated_Age").as("Age"),
first("DD_Height").as("Height"),
first("DD_Weight").as("Weight"),
first("DD_BMI").as("BMI")
)
DemographDataFirst.show(5)*/
}
/******************** 17.5.1.2 Prepare Severity Risk Factor data and export them ******************/
def ExportSeverityRiskFactor(df:DataFrame):Unit={
val spark=df.sparkSession;
import spark.implicits._
/********************** prepare the History_and_comorbidity/Security_and_risk_factor columns**********************/
val severityRiskFactor=Array("Patient","SRF_ASA","SRF_ASA_NA","SRF_CEI","SRF_Coma","SRF_Diag_Cat","SRF_Inhalation","SRF_Maccabe",
"SRF_Pulmo_Contusion","SRF_Statins","SRF_CMV_Ab","SRF_HSV1_Ab","SRF_SAPSII")
val severityRiskFactorData=df.select(severityRiskFactor.head,severityRiskFactor.tail: _*).dropDuplicates().orderBy($"Patient".asc)
severityRiskFactorData.show(5)
/*Step1: normalize data for transmart format*/
val srfData=NormalizeColNameForTransmart(severityRiskFactorData,severityRiskFactor)
srfData.show(3,false)
/*Step2 : change col name*/
/*Step3 : check null col*/
//countNullValue(srfData)
/*Step4 : replace null with transmart required null value
* For digit columns -> .
* For String columns -> Not Available
* In the column "SRF_CMV_Ab","SRF_HSV1_Ab" (digit columns), we have special value such as Neg, we also want to
* replace it by 0.
* */
/* String columns : */
val strColumns=Array("SRF_ASA_NA","SRF_CEI","SRF_Coma","SRF_Diag_Cat","SRF_Inhalation","SRF_Maccabe","SRF_Pulmo_Contusion","SRF_Statins")
// fill string null value
val fillStr=fillTransmartNullForStrCol(srfData,strColumns,nullValue)
/* Digit columns : */
val digitColumns=Array("SRF_ASA","SRF_CMV_Ab","SRF_HSV1_Ab","SRF_SAPSII")
// fill digit null value
val fillDigit=fillTransmartNullForDigitCol(fillStr,digitColumns,nullValue)
// Replace Neg with 0 for col SRF_CMV_Ab and SRF_HSV1_Ab
val finalSrfData=replaceSpecValue(fillDigit,Array("SRF_CMV_Ab","SRF_HSV1_Ab"),"Neg", "0")
/* Step5 : output data to disk*/
finalSrfData.show(5,false)
WriteDataToDisk(finalSrfData,"/tmp/Realism","SRF_data")
/*// Check SRF_CMV_Ab column . number = the number of null or not, we get 195 which is a match
val checkColName="SRF_CMV_Ab"
finalSrfData.select(checkColName).distinct().orderBy(finalSrfData(checkColName).desc).show(10,false)
val SRF_CMV_count=finalSrfData.filter(finalSrfData(checkColName)===".").distinct().count()*/
/*// The count shows no more null in all columns
countNullValue(finalSrfData)*/
}
/********************** 17.5.1.4 prepare the sofa D1 Columns data and export them as csv **********************/
def ExportSofaD1Data(df:DataFrame):Unit={
val spark=df.sparkSession;
import spark.implicits._
/* sofa_D1 has all the column which are not time point related*/
val sofaD1=Array("Patient","CBD_Cardio_SOFA_Theoretical_D1","CBD_Coag_SOFA_Theoretical_D1",
"CBD_Dobut_SOFA_Theoretical_D1","CBD_Hepat_SOFA_Theoretical_D1","CBD_Neuro_SOFA_Theoretical_D1","CBD_Renal_SOFA_Theoretical_D1",
"CBD_Resp_SOFA_Theoretical_D1","CBD_SOFA_Theoretical_D1")
val sofaD1Data=df.select(sofaD1.head,sofaD1.tail:_*).dropDuplicates().orderBy($"Patient".asc)
/*Step1: normalize data for transmart format*/
val sofaD1NormData=NormalizeColNameForTransmart(sofaD1Data,sofaD1)
sofaD1NormData.show(3,false)
/*Step2 : change col name*/
/*Step3 : check null col*/
//countNullValue(sofaD1NormData)
/*Step4 : replace null with transmart required null value*/
/*String column: */
val strColumns=Array("CBD_Dobut_SOFA_Theoretical_D1")
val fillStr=fillTransmartNullForStrCol(sofaD1NormData,strColumns,nullValue)
/*Digit column: */
val digitColumns=Array("CBD_Cardio_SOFA_Theoretical_D1","CBD_Coag_SOFA_Theoretical_D1",
"CBD_Hepat_SOFA_Theoretical_D1","CBD_Neuro_SOFA_Theoretical_D1","CBD_Renal_SOFA_Theoretical_D1",
"CBD_Resp_SOFA_Theoretical_D1","CBD_SOFA_Theoretical_D1")
val fillDigit=fillTransmartNullForDigitCol(fillStr,digitColumns,nullValue)
/*check distinct value of each column, no special value found, so final data= fillDigit*/
//getDistinctValueOfColumns(fillDigit,strColumns,10)
// getDistinctValueOfColumns(fillDigit,digitColumns,10)
val finalSofaD1Data=fillDigit
/*Step5 : output data to disk*/
WriteDataToDisk(finalSofaD1Data,"/tmp/Realism","SofaD1_data")
}
/******************* 172.16.58.3 prepare the sofa with time point Columns data and export them as csv *****************/
def ExportSofaTPData(df:DataFrame):Unit={
val spark=df.sparkSession;
import spark.implicits._
val sofaValueColumns=Array("CBD_Cardio_SOFA","CBD_Coag_SOFA",
"CBD_Dobut_SOFA","CBD_Hepat_SOFA",
"CBD_Neuro_SOFA","CBD_Renal_SOFA",
"CBD_Resp_SOFA","CBD_SOFA_NA","CBD_SOFA")
val utilityColumns=Array(patientIdColName,timePointColName)
val allColumns=utilityColumns++sofaValueColumns
val sofaRawData=df.select(allColumns.head,allColumns.tail:_*).dropDuplicates().orderBy($"Patient".asc)
/* We find out the rows of time_point D14, D28, D60, all the value columns are null, so we decide to remove
* these rows */
/* We can conclude the refine process is correct, we have 981 null rows in D14, D28 and D60, before refine process,
* we have 2452 rows, after we have 1471 rows */
val sofaRefinedData=removeRowsWithSpecValues(sofaRawData,"Time_Point",Array("D14","D28","D60"))
sofaRefinedData.show(3,false)
sofaRefinedData.cache()
/*Step0: transform multi rows to columns*/
val sofaTPData=BuildColumnsWithTimePointS1(sofaRefinedData,sofaValueColumns,utilityColumns)
/*Step1: normalize data for transmart format*/
val sofaTPNormData=NormalizeColNameForTransmart(sofaTPData,sofaTPData.columns.toArray)
sofaTPNormData.show(3,false)
/*Step2 : change col name*/
/*Step3 : check null col, all value columns has null values, so we need to do
* fill null on all columns */
//countNullValue(sofaTPNormData)
/*Step4 : replace null with transmart required null value*/
/* string columns*/
val strColumns=Array("CBD_Dobut_SOFA_D00","CBD_Dobut_SOFA_D01","CBD_Dobut_SOFA_D01-D02","CBD_Dobut_SOFA_D02","CBD_Dobut_SOFA_D03-D04","CBD_Dobut_SOFA_D05-D07",
"CBD_SOFA_NA_D00","CBD_SOFA_NA_D01","CBD_SOFA_NA_D01-D02","CBD_SOFA_NA_D02","CBD_SOFA_NA_D03-D04","CBD_SOFA_NA_D05-D07")
val fillStr=fillTransmartNullForStrCol(sofaTPNormData,strColumns,nullValue)
/* digit columns*/
val digitColumns=Array("CBD_Cardio_SOFA_D00","CBD_Cardio_SOFA_D01","CBD_Cardio_SOFA_D01-D02","CBD_Cardio_SOFA_D02","CBD_Cardio_SOFA_D03-D04","CBD_Cardio_SOFA_D05-D07",
"CBD_Coag_SOFA_D00","CBD_Coag_SOFA_D01","CBD_Coag_SOFA_D01-D02","CBD_Coag_SOFA_D02","CBD_Coag_SOFA_D03-D04","CBD_Coag_SOFA_D05-D07",
"CBD_Hepat_SOFA_D00","CBD_Hepat_SOFA_D01","CBD_Hepat_SOFA_D01-D02","CBD_Hepat_SOFA_D02","CBD_Hepat_SOFA_D03-D04","CBD_Hepat_SOFA_D05-D07",
"CBD_Neuro_SOFA_D00","CBD_Neuro_SOFA_D01","CBD_Neuro_SOFA_D01-D02","CBD_Neuro_SOFA_D02","CBD_Neuro_SOFA_D03-D04","CBD_Neuro_SOFA_D05-D07",
"CBD_Renal_SOFA_D00","CBD_Renal_SOFA_D01","CBD_Renal_SOFA_D01-D02","CBD_Renal_SOFA_D02","CBD_Renal_SOFA_D03-D04","CBD_Renal_SOFA_D05-D07",
"CBD_Resp_SOFA_D00","CBD_Resp_SOFA_D01","CBD_Resp_SOFA_D01-D02","CBD_Resp_SOFA_D02","CBD_Resp_SOFA_D03-D04","CBD_Resp_SOFA_D05-D07",
"CBD_SOFA_D00","CBD_SOFA_D01","CBD_SOFA_D01-D02","CBD_SOFA_D02","CBD_SOFA_D03-D04","CBD_SOFA_D05-D07")
val fillDigit=fillTransmartNullForDigitCol(fillStr,digitColumns,nullValue)
/*check distinct value of each column, no special value found, so final data= fillDigit*/
//getDistinctValueOfColumns(fillDigit,strColumns,10)
// getDistinctValueOfColumns(fillDigit,digitColumns,10)
val finalSofaTPData=fillDigit
/*Step5 : output data to disk*/
WriteDataToDisk(finalSofaTPData,"/tmp/Realism","SofaTP_data")
}
def ExportDataWithoutTP(df:DataFrame):Unit={
val spark=df.sparkSession;
import spark.implicits._
val demographicColumns=Array("Patient","Subgroup","DD_Gender","DD_Calculated_Age","DD_Height","DD_Weight","DD_BMI")
val severityRiskFactor=Array("Patient","SRF_ASA","SRF_ASA_NA","SRF_CEI","SRF_Coma","SRF_Diag_Cat","SRF_Inhalation","SRF_Maccabe",
"SRF_Pulmo_Contusion","SRF_Statins","SRF_CMV_Ab","SRF_HSV1_Ab","SRF_SAPSII")
/************************ prepare the History_and_comorbidity/Charlson columns ***************************/
val charlson=Array("Patient","Charlson_AIDS","Charlson_Cerebrovasc_Disease","Charlson_Chronic_Pulm_Disease",
"Charlson_Congest_Heart_Failure","Charlson_Connect_Tissue_Disease","Charlson_Dementia","Charlson_Hemiplegia",
"Charlson_Kidney_Disease","Charlson_Leukemia","Charlson_Liver_Disease","Charlson_Malign_Lymphoma",
"Charlson_Mellitus_Diabetes", "Charlson_Myocardial_Inf","Charlson_Peptic_Ulcer_Disease",
"Charlson_Periph_Vascular_Disease","Charlson_Solid_Tumor","Charlson_Score")
val charlsonData=df.select(charlson.head,charlson.tail:_*)
// charlsonData.show(5)
// charlsonData.dropDuplicates().show()
/************************** Prepare the History_and_comorbidity/SOFA, SOFA is time point related.***************/
val sofa=Array("Patient","TP_Class","CBD_Cardio_SOFA","CBD_Cardio_SOFA_Theoretical_D1","CBD_Coag_SOFA","CBD_Coag_SOFA_Theoretical_D1",
"CBD_Dobut_SOFA","CBD_Dobut_SOFA_Theoretical_D1","CBD_Hepat_SOFA","CBD_Hepat_SOFA_Theoretical_D1",
"CBD_Neuro_SOFA","CBD_Neuro_SOFA_Theoretical_D1","CBD_Renal_SOFA","CBD_Renal_SOFA_Theoretical_D1",
"CBD_Resp_SOFA","CBD_Resp_SOFA_Theoretical_D1","CBD_SOFA_NA","CBD_SOFA","CBD_SOFA_Theoretical_D1")
val sofaData=df.select(sofa.head,sofa.tail:_*)
// sofaData.show(5)
// sofaData.dropDuplicates().show()
/*************************** prepare the History_and_comorbidity/Clinical_biological_data ************************/
/* CBD is time related too */
val clinicalBiological=Array("Patient","TP_Class","CBD_AdreN","CBD_AdreN_Theoretical_D1","CBD_AdreN_Dose","CBD_AdreN_Dose_Theoretical_D1",
"CBD_NorAdreN","CBD_NorAdreN_Theoretical_D1","CBD_NorAdreN_Dose","CBD_NorAdreN_Dose_Theoretical_D1",
"CBD_Dobut_Dose","CBD_Dobut_Dose_Theoretical_D1","CBD_ALAT","CBD_ALAT_Theoretical_D1","CBD_ASAT",
"CBD_ASAT_Theoretical_D1","CBD_Bilirub","CBD_Bilirub_Theoretical_D1","CBD_Creat","CBD_Creat_Theoretical_D1",
"CBD_Diuresis","CBD_Diuresis_Theoretical_D1","CBD_Eosino","CBD_Eosino_Theoretical_D1","CBD_Leuco",
"CBD_Leuco_Theoretical_D1","CBD_Lympho","CBD_Lympho_Theoretical_D1","CBD_Mono","CBD_Mono_Theoretical_D1",
"CBD_Neutro","CBD_Neutro_Theoretical_D1","CBD_FiO2","CBD_FiO2_Theoretical_D1", "CBD_PaO2FiO2",
"CBD_PaO2FiO2_Theoretical_D1","CBD_MAP","CBD_MAP_Theoretical_D1","CBD_PAL","CBD_PAL_Theoretical_D1",
"CBD_Plat","CBD_Plat_Theoretical_D1","CBD_Hb","CBD_Hb_Theoretical_D1","CBD_Lactate","CBD_Lactate_Theoretical_D1",
"CBD_pH","CBD_pH_Theoretical_D1","CBD_Glasgow","CBD_Glasgow_Theoretical_D1","CBD_Presence_Mecha_Ventil",
"CBD_Presence_Mecha_Ventil_Theoretical_D1","CBD_PCT","CBD_PCT_Theoretical_D1")
val clinicalBiologicalData=df.select(clinicalBiological.head,clinicalBiological.tail:_*)
//clinicalBiologicalData.show(5)
//clinicalBiologicalData.dropDuplicates().show(5)
/************************* prepare History_and_comorbidity/Administrated cares **************************/
val administeredCares=Array("Patient","AC_Blood_Derivated_Products","AC_Mass_Blood_Transf","AC_Fresh_Frozen_Plasma","AC_Catechol",
"AC_Catechol_Duration","AC_Catechol_D30FD","AC_HCHS","AC_HCHS_Duration","AC_HCHS_D30FD","AC_Corticotherapy_Other",
"AC_Corticotherapy_Other_Duration","AC_Corticotherapy_Other_D30FD","AC_Continuous_RRT","AC_InterM_Hemodialysis",
"AC_InterM_Hemodialysis_Sessions_Number","AC_RRT","AC_RRT_Duration","AC_RRT_D30FD","AC_Infection_Source_Controlled",
"AC_Surgical_Intervention","AC_Surgical_Intervention_Nb")
val administeredCaresData=df.select(administeredCares.head,administeredCares.tail:_*)
//administeredCaresData.show(5)
//administeredCaresData.dropDuplicates().show(5)
/************************* prepare History_and_comorbidity/Invasive devices **************************/
val invasiveDevices=Array("Patient","ID_Intub_Tracheo","ID_Intub_Duration","ID_Intub_D30FD","ID_Reintub","ID_Mechanical_Ventilation",
"ID_Mechanical_Ventilation_Duration","ID_Mechanical_Ventilation_D30FD","ID_Urin_Cath","ID_Urin_Cath_Duration",
"ID_Urin_Cath_D30FD","ID_Venous_Cath","ID_Venous_Cath_Duration","ID_Venous_Cath_D30FD")
val invasiveDevicesData=df.select(invasiveDevices.head,invasiveDevices.tail: _*)
//invasiveDevicesData.show(5)
//invasiveDevicesData.dropDuplicates().show(5)
/*
Clinical follow up (no time related variables)
├─ Follow up
├─ Hospital Acquired Infection
HAI_number (integer)
HAI_Time_To_HAI1_All (integer)
HAI_Time_To_HAI1_Definite (integer)
HAI_Time_To_HAI1_Likely (integer)
├─ Group specific follow up
├─ Septic shock
├─ Severe Trauma
├─ Severe Burn
└─ Major Surgery
├─ EQ5D
└─ End of study
*/
/***************************** Prepare clinical follow up/follow up *****************************************/
val followUp=Array("Patient","FUD_ICU_Disch_Dest","FUD_ICU_Disch_Status","FUD_ICU_LOS","FUD_ICU_D30FD","FUD_Hosp_Disch_Dest",
"FUD_Hosp_Disch_Status","FUD_Hosp_LOS","FUD_Hosp_D30FD","FUD_D30_Survival_Time","FUD_D14_Status","FUD_D28_Status",
"FUD_D60_Status","FUD_D90_Status","FUD_Anti_Inf_D14","FUD_Anti_Inf_D28","FUD_Anti_Inf_D60","FUD_Anti_Inf_D90",
"FUD_Chemo_D28","FUD_Chemo_D60","FUD_Chemo_D90")
val followUpData=df.select(followUp.head,followUp.tail:_*)
//followUpData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Hospital_Acquired_Infection **************************/
val hospitalAcquiredInfection=Array("Patient","HAI_Number","HAI_Time_To_HAI1_All","HAI_Time_To_HAI1_Definite","HAI_Time_To_HAI1_Likely",
"HAI_D30_All_Status","HAI_D30_All_Time","HAI_D30_Definite_Status","HAI_D30_Definite_Time")
val hospitalAcquiredInfectionData=df.select(hospitalAcquiredInfection.head,hospitalAcquiredInfection.tail:_*)
//hospitalAcquiredInfectionData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Group_specific_follow_up/Septic_shock *****************/
/* Possible typo at "SS_Bacteremie_Germ2"*/
val groupFollowUPSepsis=Array("Patient", "SS_Septic_Shock_At_Inclusion","SS_Inf_Localization","SS_Inf_Localization_Clarif","SS_Inf_Type",
"SS_Inf_Acq_Type","SS_Germ1","SS_Germ1_Cat_Standard","SS_Germ1_Cat_Detailed","SS_Germ2","SS_Germ2_Cat_Standard",
"SS_Germ2_Cat_Detailed","SS_Bacteremia","SS_Bacteremia_Germ1","SS_Bacteremie_Germ2")
val groupFollowUpSepsisData=df.select(groupFollowUPSepsis.head,groupFollowUPSepsis.tail:_*)
//groupFollowUpSepsisData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Group_specific_follow_up/Severe_Trauma *****************/
val groupFollowUPTrauma=Array("Patient","ST_ISS","ST_Prophyl_Antibio_Admin")
val groupFollowUPTraumaData=df.select(groupFollowUPTrauma.head,groupFollowUPTrauma.tail:_*)
//groupFollowUPTraumaData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Group_specific_follow_up/Severe_Burn *****************/
val groupFollowUPBurn=Array("Patient","SB_Body_Surface_Burn_Perc","SB_Revised_Baux_Score","SB_Smoke_Inhalation","SB_Burn_Prophyl_Antibio_Admin")
val groupFollowUPBurnData=df.select(groupFollowUPBurn.head,groupFollowUPBurn.tail:_*)
//groupFollowUPBurnData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Group_specific_follow_up/Major_Surgery *****************/
val groupFollowUPSurgery=Array("Patient","MS_Intervention_Type","MS_Prophyl_ATB")
val groupFollowUPSurgeryData=df.select(groupFollowUPSurgery.head,groupFollowUPSurgery.tail:_*)
// groupFollowUPSurgeryData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Group_specific_follow_up/EQ5D *****************/
val groupFollowUPEQ5D=Array("Patient","EQ5D_Anxiety_Depression_D9","EQ5D_Form_Completed_D90","EQ5D_Mobility_D90",
"EQ5D_Mobility_D90","EQ5D_Pain_Discomfort_D90","EQ5D_Self_Care_D90","EQ5D_Usual_Activities_D90")
val groupFollowUPEQ5DData=df.select(groupFollowUPEQ5D.head,groupFollowUPEQ5D.tail:_*)
//groupFollowUPEQ5DData.dropDuplicates().show(5)
/***************************** Prepare clinical follow up/Group_specific_follow_up/End_of_Study *****************/
val groupFollowUPEnd=Array("Patient","ES_Prema_End","ES_Prema_Reason_End","ES_Other_Reason_Prema_End",
"ES_Exclusion","ES_Specimen_Destruction")
val groupFollowUPEndData=df.select(groupFollowUPEnd.head,groupFollowUPEnd.tail:_*)
// groupFollowUPEndData.dropDuplicates().show(5)
/******************************* Join dataframe test *************************/
// The following code will result a dataframe with two column of Patient
/* val joinTest=groupFollowUPEQ5DData.join(groupFollowUPEndData,groupFollowUPEQ5DData("Patient")===groupFollowUPEndData("Patient"),"inner")
joinTest.show(5)
*/
/* With the following code, we only have one Patient column*/
//val joinTest=groupFollowUPEQ5DData.join(groupFollowUPEndData,Seq("Patient"),"inner")
//joinTest.show(5)
/******************************* join the column and do a new select ******/
// use column union to get all data without time point
val allColumnsWtihoutTP=demographicColumns
.union(severityRiskFactor)
.union(charlson)
.union(administeredCares)
.union(invasiveDevices)
.union(followUp)
.union(hospitalAcquiredInfection)
.union(groupFollowUPSepsis)
.union(groupFollowUPTrauma)
.union(groupFollowUPBurn)
.union(groupFollowUPSurgery)
.union(groupFollowUPEQ5D)
.union(groupFollowUPEnd).distinct
val columnUnionTest=df.select(allColumnsWtihoutTP.head,allColumnsWtihoutTP.tail:_*).dropDuplicates()
//columnUnionTest.count()
// 552 row (Patient) in total
val columnNumWithoutTP=columnUnionTest.columns.length
//columnUnionTest.orderBy($"Patient".asc).show(2)
}
/*********************************** 17.3 Change date value for easier sorting *******************************/
def ModifyTimePoint(df:DataFrame):DataFrame={
val spark=df.sparkSession
spark.udf.register("changeTimePoint",(timePoint:String)=>changeTimePoint(timePoint))
val dfWithNewTimePoint=df.withColumn("Time_Point",expr("changeTimePoint(TP_Class)"))
dfWithNewTimePoint.select("TP_Class","Time_Point").distinct().show(10)
/*dfWithNewTimePoint.coalesce(1).write.mode(SaveMode.Overwrite)
.option("header","true")
.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of crc files
.option("encoding", "UTF-8")
.csv(outputPath+"/TimePoint")*/
return dfWithNewTimePoint
}
def changeTimePoint(timePoint:String):String={
timePoint match {
case "D0" => "D00"
case "D1" => "D01"
case "D1-D2" => "D01-D02"
case "D2" => "D02"
case "D3-D4" => "D03-D04"
case "D5-D7" => "D05-D07"
case "D14" => "D14"
case "D28" => "D28"
case "D60" => "D60"
case "HV" => "D00"
case _=>null
}
}
def getStatsOfEachSubGroup(df:DataFrame,groupName:String):DataFrame={
val subGroup=df.filter(df("Subgroup")===groupName)
val subGroupPatientRows=subGroup.groupBy("Patient").count().select("count").distinct().orderBy(asc("count"))
println(s"**************************** All possible patient rows of sub group ${groupName}*******************")
subGroupPatientRows.show()
println(s"**************************** All possible time point of sub group ${groupName}*******************")
val subGroupTimePoint=subGroup.select("TP_Class").distinct().orderBy(asc("TP_Class"))
subGroupTimePoint.show(10)
return subGroup
}
def WriteDataToDisk(df:DataFrame,outputPath:String,fileName:String): Unit ={
df.coalesce(1).write.mode(SaveMode.Overwrite)
.option("header","true")
.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of crc files
.option("encoding", "UTF-8")
.option("delimiter", outputCsvDelimiter) // set tab as delimiter, required by tranSMART
.csv(outputPath+"/"+fileName)
}
def NormalizeColNameForTransmart(df:DataFrame,colNames:Array[String]):DataFrame={
val spark=df.sparkSession
import spark.implicits._
//Add STUDY id
val dfWithStudyID=df.withColumn("STUDY_ID",lit(studyID))
//change Patient to SUBJ_ID
val dfWithSub=dfWithStudyID.withColumnRenamed("Patient",subjID)
val colNameWithOrder=Array("STUDY_ID",subjID)++colNames.filter(!_.equals(patientIdColName))
val result=dfWithSub.select(colNameWithOrder.head,colNameWithOrder.tail:_*)
return result
}
def ChangeColName(df:DataFrame,nameMap:Map[String,String]):DataFrame={
val oldNames=nameMap.keySet.toArray
var result=df
for(oldName<-oldNames){
result=result.withColumnRenamed(oldName,nameMap.getOrElse(oldName,"No_keys"))
}
return result
}
def countNullValue(df:DataFrame):Unit={
val spark=df.sparkSession
import spark.implicits._
for(colName<-df.columns){
val nullCount=df.filter(df(colName).isNull||df(colName).isNaN||df(colName)===""||df(colName)===nullValue).count()
println(s"The null value count of the column $colName is $nullCount")
}
}
def fillTransmartNullForDigitCol(rawDf:DataFrame,colNames:Array[String],userDefinedNull:String):DataFrame={
val digitNull="."
/*Step 0 : cast all column to string*/
val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*)
//df.show(5)
/*Step 1 : fill na with digitNull to the given column*/
val naFill=df.na.fill(digitNull,colNames)
//naFill.show(5)
/*Step 2: fill user defined null with digitNull*/
val result=replaceSpecValue(naFill,colNames,userDefinedNull,digitNull)
result
}
def fillTransmartNullForStrCol(rawDf:DataFrame,colNames:Array[String],userDefinedNull:String):DataFrame={
val strNull="Not Available"
/*Step 0 : cast all column to string*/
val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*)
// df.show(5)
/*Step 1 : fill na with digitNull to the given column*/
val naFill=df.na.fill(strNull,colNames)
// naFill.show(5)
/*Step 2: fill user defined null with digitNull*/
val result=replaceSpecValue(naFill,colNames,userDefinedNull,strNull)
result
}
def replaceSpecValue(rawDf:DataFrame,colNames:Array[String],specValue:String,newValue:String):DataFrame={
/*Step 0 : cast all column to string*/
val spark=rawDf.sparkSession
import spark.implicits._
val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*)
/*Step 1 : transform spec value to null*/
var result=df
for(colName<-colNames){
val newColName=colName+"_tmp"
result=result.withColumn(newColName, when(result(colName) === specValue, newValue).otherwise(result(colName))) //create a tmp col with digitnull
.drop(colName) //drop the old column
.withColumnRenamed(newColName,colName) // rename the tmp to colName
}
result
}
def getDistinctValueOfColumns(df:DataFrame,colNames:Array[String],showRange:Int):Unit={
for(colName<-colNames){
df.select(colName).distinct().show(showRange,false)
}
}
def removeRowsWithSpecValues(df:DataFrame,colName:String,specValues:Array[String]):DataFrame={
var result=df
for(specValue<-specValues){
result=result.filter(!(result(colName)===specValue))
}
result
}
def getColumnNumNameMapping(df:DataFrame):scala.collection.immutable.ListMap[Int,String]={
val columns=df.columns
var i=1
var colNameNumMap=Map[Int,String]()
for(col<-columns){
colNameNumMap+=(i->col)
i=i+1
}
ListMap(colNameNumMap.toSeq.sortWith(_._1 < _._1):_*)
}
/***********************************************************************************************************
* ************************************** Annexe *******************************************
* ******************************************************************************************************/
/*
*
* the :_* syntax which means "treat this sequence as a sequence"! Otherwise, your sequence of n items will be
* treated as a sequence of 1 item (which will be your sequence of n items).
*
* val seq = List(1, 2, 3)
funcWhichTakesSeq(seq) //1: Array(List(1, 2, 3)) -i.e. a Seq with one entry
funcWhichTakesSeq(seq: _*) //3: List(1, 2, 3)
* def funcWhichTakesSeq(seq: Any*) = println(seq.length + ": " + seq)
* */
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/WordCount.scala | package org.pengfei.spark
import org.apache.spark.{SparkConf, SparkContext}
object WordCount {
def main(args: Array[String]): Unit ={
val inputFile = "file:///tmp/word.txt"
val conf = new SparkConf().setAppName("WordCount").setMaster("local")
val sc = new SparkContext(conf)
val textFile = sc.textFile(inputFile)
val wordCount = textFile.flatMap(line=>line.split(" ")).map(word=>(word,1)).reduceByKey((a,b)=>a+b)
wordCount.foreach(println)
wordCount.saveAsTextFile("file:///tmp/wordCount")
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson17_Analyze_Clinical_Data/Lesson17_Row_To_Column_To_Row.scala | package org.pengfei.Lesson17_Analyze_Clinical_Data
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.functions._
object Lesson17_Row_To_Column_To_Row {
/* implicit class NullOccludingMap[K, V](private val underlying: Map[K, V]) extends AnyVal {
def getNonNullOrElse(key: K, default: V): V = {
underlying.get(key) match {
case Some(value) if value != null => value
case _ => default
}
}
}*/
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson10_Spark_Application_ETL").getOrCreate()
import spark.implicits._
/* Here, we reuse the data from Lesson10, in Lesson10 we generate stats of the data with the code below,
* to accelarate, we output the stats in a file named stats.csv. We just read it directly not generate each time
* It can be generate with the following code*/
/* val filePath="/DATA/data_set/spark/basics/Lesson10_Spark_Application_ETL/hospital_data"
val outputPath="/DATA/data_set/spark/basics/Lesson17_Analyse_Clinical_Data"
val block1Name="/block_1.csv"
val df=spark.read.option("header","true").option("nullValue","?").option("inferSchema","true").csv(filePath+block1Name)
val stats=df.describe()
stats.show(5)
stats.coalesce(1).write.mode(SaveMode.Overwrite)
.option("header","true")
.option("encoding","UTF-8")
.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of job success files
.csv(outputPath+"/row_to_Column")*/
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val statsFilePath = s"${path}/spark_lessons/Lesson17_Analyse_Clinical_Data/row_to_Column/stats.csv"
val stats = spark.read.option("header", "true").option("nullValue", "?").option("inferSchema", "true").csv(statsFilePath)
stats.show(5)
//stats.printSchema()
/** ************************************************************************************************************
* *******************************1. Column to Row *********************************************************
* ************************************************************************************************************/
/* In the first part we want to transform colulmn to row,
origin df (stats)
|summary| id_1| id_2| cmp_fname_c1| cmp_fname_c2| cmp_lname_c1| ... |
+-------+------------------+-----------------+------------------+------------------+-------------------+
| count| 574913| 574913| 574811| 10325| 574913|
| mean|33271.962171667714| 66564.6636865056|0.7127592938252765|0.8977586763518972|0.31557245780987964|
| stddev| 23622.66942593358|23642.00230967225|0.3889286452463553|0.2742577520430534| 0.3342494687554251|
| min| 1| 6| 0.0| 0.0| 0.0|
| max| 99894| 100000| 1.0| 1.0| 1.0|
+-------+------------------+-----------------+------------------+------------------+
*
* to the following form statsInRow
*
* +------+------------+-------------------+
|metric| field| value|
+------+------------+-------------------+
| count| id_1| 5749132.0|
| count| id_2| 5749132.0|
| count|cmp_fname_c1| 5748125.0|
...
| count| cmp_by| 5748337.0|
| count| cmp_plz| 5736289.0|
| mean| id_1| 33324.48559643438|
| mean| id_2| 66587.43558331935|*/
val columns = stats.columns
val statsInRow: Dataset[(String, String, Double)] = stats.flatMap(row => {
// The first column is the metric id, which is also the first column of origin df
val metric = row.getAs[String](0)
// The second column is the filed name, which is the column name of origin df
// The third column is the filed value, which is the row value of each column of the origin df.
// we have 11 column in one row of the origin df which need to transform into 11 rows in the new df
(1 until row.size).map(i =>
(metric, columns(i), row.getDouble(i))
)
})
val statsInRowDf = statsInRow.toDF("metric", "filedID", "filedValue")
statsInRowDf.show(5)
/** ************************************************************************************************************
* *******************************1. Row to Column *********************************************************
* ************************************************************************************************************/
/* Row to Column has two scenarios:
* - 1. row number for each subject is the same and the order of the filed is the same (In this example), We can use
* pivot function to the job easily
* - 2. row number for each subject is different, we can not use the pivot function anymore.
*
* In this Lesson, we will show how to work with the two scenarios*/
/** *********************************** Scenario 1 **************************************************************/
val statsInColumn = statsInRowDf.groupBy("metric")
.pivot("filedID")
.agg(expr("coalesce(first(filedValue), \"pending\")"))
statsInColumn.show(10)
/** ***********************************Scenario 2 ***********************************************************/
/* Pivot won't work in Scenario 2, because the number and order of rows for different subjects are not the same,
* Solution 1, add null rows with right order for each subjects, too complex to do
* Solution 2, Step1. get all possible filedName,
* Step2. convert rows of the same subject(metric) to a column of a list of (key,value) pair,
* if the filedValue row does not exist for the given filedName, pull null as value.
* Step3. For each (key,value), create a column*/
val statsInColumn2=RowToColumn(statsInRowDf,"metric","filedID","filedValue")
statsInColumn2.show(10)
}
def RowToColumn(df: DataFrame, objectIdColumnName:String,targetIdColumnName: String,targetValueColumnName:String): DataFrame = {
val spark = df.sparkSession
import spark.implicits._
/* Step1. Get all filedIDs,*/
val filedIDs = df.select(targetIdColumnName).distinct().orderBy(df(targetIdColumnName).asc)
//filedIDs.show(10)
// make the Array of the filed Ids.
val filedIDsArray: Array[String] = filedIDs.collect().map(_.get(0).toString)
/*for(filedId<-filedIDsArray){
println(s"filedId value is ${filedId}")
}
*/
/* Step2. Build the <key,value> map for each subject. */
/* //Spark provide map function which do the same thing as the following code
def buildFiledMap(filedName:String,filedValue:Double):Map[String,Double]={
Map(filedName->filedValue)
}
spark.udf.register("buildFiledMap",(arg1:String,arg2:Double)=>buildFiledMap(arg1,arg2))
val statsMap=statsInRowDf.withColumn("filed_map",expr("buildFiledMap(filedID,filedValue)"))
statsMap.show(5)
*/
val filedIdValueMap = df.withColumn("filed_map", map(df(targetIdColumnName), df(targetValueColumnName)))
//filedIdValueMap.show(5)
/* Step3. Group the filed map for each distinct subject */
val groupedStats = filedIdValueMap.groupBy(objectIdColumnName)
.agg(collect_list("filed_map"))
.as[(String, Seq[Map[String, Double]])] // <-- leave Rows for typed pairs
.map { case (id, list) => (id, list.reduce(_ ++ _)) } // <-- collect all entries under one map
.toDF(objectIdColumnName, "filed_map")
// groupedStats.show(10, false)
/* Step 4. Complete filed map for missing filed*/
val bFiledIDsArray: Broadcast[Array[String]] = spark.sparkContext.broadcast(filedIDsArray)
/*val completeStats = groupedStats.map(row => {
val metric = row.getString(0)
var currentFiledMap = scala.collection.mutable.Map(row.getAs[Map[String, Double]](1).toSeq: _*)
(0 until bFiledIDsArray.value.length).map { i =>
val filedId: String = bFiledIDsArray.value(i)
if (!currentFiledMap.contains(filedId)) {
currentFiledMap += (filedId -> 12345.54321)
}
}
(metric, currentFiledMap)
})
val completeStatsDf = groupedStats.toDF("metric", "filed_map")
completeStatsDf.show(10, false)
*/
/* Step 5. Create column for each field, with the getFiledValue function, the step 4 may be omitted*/
def getFiledValue(filedId: String, filedMap: Map[String, Double]): String = {
filedMap.getOrElse(filedId, "").toString
}
spark.udf.register("getFiledValue", (arg1: String, arg2: Map[String, Double]) => getFiledValue(arg1, arg2))
var tmpDf = groupedStats
(0 until bFiledIDsArray.value.length).map { i =>
val filedId: String = bFiledIDsArray.value(i)
tmpDf = tmpDf.withColumn("current_id", lit(filedId))
.withColumn(filedId, expr("getFiledValue(current_id,filed_map)"))
.drop("current_id")
}
val result=tmpDf.drop("filed_map")
return result
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_5_2_2_MLlib_API_Models.scala | package org.pengfei.Lesson05_Spark_ML
import java.util
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.ml.clustering.GaussianMixture
import org.apache.spark.mllib.classification.SVMWithSGD
import org.apache.spark.mllib.clustering.{GaussianMixtureModel, KMeans, KMeansModel, PowerIterationClustering}
import org.apache.spark.mllib.evaluation.{BinaryClassificationMetrics, MulticlassMetrics, RegressionMetrics}
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionModel, LinearRegressionWithSGD}
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.mllib.tree.{DecisionTree, GradientBoostedTrees, RandomForest}
object Lesson05_5_2_2_MLlib_API_Models {
/***********************************************************************************************************
* ************************************5.5.2.2 MLlib API models *********************************************
* *****************************************************************************************************/
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson5_5_2_2_MLlib_API_Models").getOrCreate()
/*
* In this lesson, we describes some MLlib’s abstractions for representing machine learning algorithms and models.
* As we said before, the MLlib is always growing. So the models which we present here is the current state of MLlib
* of year 2017.
*
* A Algorithm describes how model works to solve the ml problem, A model is the ensemble of all parameters(weights,
* intercepts, etc) which describes the result of a algo applied on a dataset. The model can be used to predict
* result of unkown data.
*
* A model in MLlib is represented by a class. MLlib provides different classes for representing models
* trained with different machine learning algorithms.
*
* Similarly, a machine learning algorithm is represented by a class. MLlib also generally provides a companion
* singleton object with the same name for each machine learning algorithm class. It is more convenient to
* train a model using a singleton object representing a machine learning algorithm.
*
* Training and using a model generally involves two key methods: train and predict. The train method
* is provided by the singleton objects representing machine learning algorithms. It trains a model with a given
* dataset and returns an instance of an algorithm-specific model class. The predict method is provided by the
* classes representing models. It returns a label for a given set of features.
*
* Spark comes prepackaged with sample datasets that can be used to experiment with the MLlib API. For
* simplicity, in this lesson, the examples use those sample datasets.
*
* Some of the data files are in the LIBSVM format. Each line stores an observation. The first column is a
* label. It is followed by the features, which are represented as offset:value, where offset is the index into the
* feature vector, and value is the value of a feature.
*
* The MLlib library provides helper functions that create RDD[LabeledPoint] from files containing
* labeled data in the LIBSVM format. These methods are provided by the MLUtils object, which is available in
* the org.apache.spark.mllib.util package.
* */
/***********************************5.5.2.2.1 MLlib API Regression models **********************************/
// LinearRegressionWithSGDExample(spark)
// RandomForestExample(spark)
/***********************************5.5.2.2.2 MLlib API Classification models **********************************/
// SVMWithSGDExample(spark)
// DecisionTreeExample(spark)
/***********************************5.5.2.2.3 MLlib API Clustering models **********************************/
// KMeansExample(spark)
// PowerIterationExample(spark)
/***********************************5.5.2.2.4 MLlib API Recommendation models **********************************/
ALSExample(spark)
}
/***********************************************************************************************************
* ************************************5.5.2.2.1 MLlib API Regression models ******************************
* *****************************************************************************************************/
/*
* The list of MLlib classes representing different regression algorithms includes
* - LinearRegressionWithSGD (deprecated since spark 2.0.0)
* - RidgeRegressionWithSGD
* - LassoWithSGD
* - ElasticNetRegression
* - IsotonicRegression
* - DecisionTree
* - GradientBoostedTrees
* - RandomForest.
* MLlib also provides companion singleton objects with the same names. These classes and objects provide methods
* for training regression models.*/
/*
* All examples in this Lesson use singleton objects for training a model. There are two most important methods in
* these singleton objects of ML algo:
* - train
* - predict
*
* The train method of a regression algorithm object trains or fits a linear regression model with a dataset
* provided to it as input. It takes an RDD of LabeledPoints as an argument and returns an algorithm-specific
* regression model.
*
* For example, the train method in the LinearRegressionWithSGD object returns an
* instance of the LinearRegressionModel class. Similarly, the train method in the DecisionTree object
* returns an instance of the DecisionTreeModel class.
*
* The train method also takes a few additional algorithm-specific hyperparameters as arguments. For
* example, the train method in the RidgeRegressionWithSGD object takes as arguments the number of
* iterations of gradient descent to run, step size for each gradient descent iteration, regularization parameter,
* fraction of data to use in each iteration, and initial set of weights. Similarly, the train method in the
* GradientBoostedTrees object takes a boosting strategy as an argument.*/
def LinearRegressionWithSGDExample(spark:SparkSession):Unit={
/******************************LinearRegressionWithSGD*******************************************/
/* linear regression model with no regularization using Stochastic Gradient Descent. This solves the least
* squares regression formulation f(weights) = 1/n ||A weights-y||^2 (which is the mean squared error).
* */
/* Step 1. prepare data
* The org.apache.spark.mllib.util.MLUtils lib provdie method which can read libsvm format and return a LabeledPoint
* RDD and return a Labeled point RDD. The we split the rdd into train and test with a ration 0.8 to 0.2*/
val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/sample_regression_data.txt"
val splited=splitData(spark,filePath)
val train=splited._1
val test=splited._2
/* Step 2. Define hyper parameters for the model (LineraRegressionWithSGD). In our case, it's the number
* of iterations. We choose 100 for speed, it's not optimal for accuracy.
* */
val numIterations = 100
// since spark 2.0, the below is deprecated. Use ml.regression.LinearRegression or LBFGS
val lrModel=LinearRegressionWithSGD.train(train,numIterations)
// check the model parameters after training, In a linear model Y=a1X1+a2X2+...+C
// Y is the label
// X1, X2, ... are the features, a1,a2,... are the weights of these labels
// C is the intercept (constant) which is the expected mean value of Y when all features=0
val intercept=lrModel.intercept
val weights=lrModel.weights
println(s"The intercept has value: ${intercept}, weights has value: ${weights}")
/* Step 3. predict with the test data*/
val predictResult=lrModel.predict(test.map{ob=>ob.features})
println(s"test result ${predictResult.toString()}")
/* Step 4. validation/Evaluation of the model*/
//Create a rdd paire of actual and predict value of the observation
val actualAndPredict=test.map{ob=>(ob.label,lrModel.predict(ob.features))}
// create an instance of the RegressionMetrics class
val regressionMetrics=new RegressionMetrics(actualAndPredict)
// check the various evaluation metrics
val mse=regressionMetrics.meanSquaredError
val rmse=regressionMetrics.rootMeanSquaredError
val mae=regressionMetrics.meanAbsoluteError
println(s"The mse value is ${mse}, The rmse value is ${rmse}, The mae value is ${mae}")
/* Step 5. model persistence*/
/* We can persists a trianed model to disk by using save method. It takes a sparkContext, and path as
* argument and saves the source models to the given path */
val modelPath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/models"
// lrModel.save(spark.sparkContext,modelPath)
/* The load method is defined in the companion model objects. It generates an instance of model from a previous
* saved model. It takes a sparkContext and the path of the saved model as arguments and returns an instance of
* a model class*/
val loadedModel=LinearRegressionModel.load(spark.sparkContext,modelPath)
/**********************************Export Model to Predictive Model Markup language (PMML)*****************/
/* PMML is an XML-based format for describing and serializing models generated by machine learning algorithms. It
* enables different applications to share models. With PMML, you can train a model in one application and
* use it from another application.
*
* */
val pmml=lrModel.toPMML()
println(s"pmml content is : ${pmml}")
}
def RandomForestExample(spark:SparkSession):Unit={
val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/sample_regression_data.txt"
val splited=splitData(spark,filePath)
val train=splited._1
val test=splited._2
/*********************************specify hyperparameters*******************************/
/* categoricalFeaturesInfo input is a Map storing arity of categorical features. An Map entry (n -> k) indicates
* that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1} */
val categoricalFeaturesInfo=Map[Int,Int]()
// specifies number of trees in the random forest
val numTrees = 3
/* FeatureSubsetStrategy specifies number of features to consider for splits at each nod. MLlib supports:
* - auto : model choese a value based on numTrees, if numTrees==1, FeatureSubsetStrategy is set to all, others
* it is set to "onethird".
* - all
* - sqrt
* - log2
* - onethird*/
val featureSubsetStrategy = "auto"
/* impurity specifies the criterion used for information gain calculation. Supported values: "variance" */
val impurity = "variance"
/* maxDepth specifies the maximum depth of the tree. Depth 0 means 1 leaf node; depth 1 means 1
* internal node + 2 leaf nodes. Max leafNumber = 2^depth, Max total node number= 2^0+2^1+...2^depth
* Suggested value: 4*/
val maxDepth = 4
/* maxBins specifies the maximum number of bins to use for splitting features. Suggested value: 100*/
val maxBins = 32
/****************************************Tain model*****************************************************/
/* As Tree models(DecisionTree,RandomForest,gradienBoosted, etc.) can both resolve regression and calssification
* problem, so we use trainRegressor or trainClassifier instead of train */
val rfModel=RandomForest.trainRegressor(train,categoricalFeaturesInfo,numTrees,featureSubsetStrategy,impurity,maxDepth,maxDepth,12345)
/****************************************Predict***********************************************/
/* The predict method of a regression model returns a numerical label for a given set of features. It takes a Vector
* or an RDD[Vector] as argument and returns a value of type Double or an RDD of Double.
*
* Thus, it can be used to either predict a label for an observation or a dataset.
It calculates the mean squared error for the model.*/
val predictResult=rfModel.predict(test.map{ob=>ob.features})
/******************************************validation*********************************************/
val actualAndPredictLabel=test.map{
ob=>(ob.label,rfModel.predict(ob.features))
}
val regressionMetrics=new RegressionMetrics(actualAndPredictLabel)
val mse=regressionMetrics.meanSquaredError
val rmse=regressionMetrics.meanSquaredError
val mae=regressionMetrics.meanAbsoluteError
println(s"The mse value is ${mse}, The rmse value is ${rmse}, The mae value is ${mae}")
}
/***********************************************************************************************************
* ************************************5.5.2.2.2 MLlib API Classification algorithms ******************************
* *****************************************************************************************************/
/*
* The list of MLlib classes representing different classification algorithms includes:
* - LogisticRegressionWithSGD
* - LogisticRegressionWithLBFGS
* - SVMWithSGD
* - NaiveBayes
* - DecisionTree,
* - GradientBoostedTrees
* - RandomForest
* MLlib also provides companion singleton objects with the same names. These classes and objects provide methods
* for training classification models, which are also referred to as classifiers.
*
* In this lesson, all examples uses singleton objects and their method to represent classification algorithms
* for training a model.*/
def SVMWithSGDExample(spark:SparkSession)={
val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/sample_classification_libsvm_data.txt"
val splited=splitData(spark,filePath)
val train=splited._1
val test=splited._2
val labels=test.map(ob=>ob.label).distinct().collect().toArray
println(s"distinct label value of test : ${labels.mkString(";")}")
/********************************specify hyperparameters***********************************/
val numIterations= 20
/********************************train model***********************/
val svmModel=SVMWithSGD.train(train,numIterations)
/**********************************predict******************************/
val predictResult=svmModel.predict(test.map(ob=>ob.features))
/************************************validation***************************/
//Build the paire rdd of predict and actual label
val predictedAndActualLabels = test.map{
ob=>(ob.label,svmModel.predict(ob.features))
}
// Create an instance of the BinaryClassificationMetric class
val metrics = new BinaryClassificationMetrics(predictedAndActualLabels)
// precision by threshold
val precision=metrics.precisionByThreshold()
precision.foreach{case(t,p)=>println(s"Threshold : $t, Precision: $p")}
// recall by threshold
val recall=metrics.recallByThreshold()
recall.foreach{case(t,r)=> println(s"Threshold : $t, Recall: $r")}
//precision-recall curve
val prc=metrics.pr()
println(s"precison recall curve : $prc")
// F-measure, the beta arg=1.0
val f1Score=metrics.fMeasureByThreshold(1.0)
f1Score.foreach { case (t, f) =>
println(s"Threshold: $t, F-score: $f, Beta = 1.0")
}
// beta =0.5
val fScore=metrics.fMeasureByThreshold(0.5)
fScore.foreach { case (t, f) =>
println(s"Threshold: $t, F-score: $f, Beta = 0.5")
}
// get area under curve metric
val auRoc=metrics.areaUnderROC()
val auPrc=metrics.areaUnderPR()
println(s"auRoc value is ${auRoc}, auPrc value is ${auPrc}")
}
def DecisionTreeExample(spark:SparkSession):Unit={
val irisPath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/iris_libsvm.txt"
val splited=splitData(spark,irisPath)
val train=splited._1
val test=splited._2
val labels=test.map(ob=>ob.label).distinct().collect().toArray
println(s"distinct label value ${labels.mkString(";")}")
//hyperParameters
val numClasses=8
val categoricalFeaturesInfo=Map[Int,Int]()
// The valide value for impurity are gini, entropy
val impurity = "gini"
val maxDepth = 4
val maxBins =16
// train model
val gbtModel=DecisionTree.trainClassifier(train,numClasses,categoricalFeaturesInfo,impurity,maxDepth,maxBins)
// predict result
val predictAndActual=test.map{ob=>(gbtModel.predict(ob.features),ob.label)}
// validation, there are three possible labels in iris dataset. so we need to use MultiClassMetrics
val metrics=new MulticlassMetrics(predictAndActual)
//recall by label, we can't use metrics inside foreach or map of an RDD or dataframe, becasue it's not serialisable
// val arrayLabels=Array(0.0,1.0,2.0)
labels.foreach(l=>println(s"Recall of label ${l} is ${metrics.recall(l)}"))
//precision by label
labels.foreach(l=>println(s"Precision of label ${l} is ${metrics.precision(l)}"))
//false positive rate by label
labels.foreach(l=>println(s"FPR of label ${l} is ${metrics.falsePositiveRate(l)}"))
//F-measure by label
labels.foreach(l=>println(s"F1 score of label ${l} is ${metrics.fMeasure(l)}"))
// Confusion matrix
println(s"Confusion matrix : ${metrics.confusionMatrix}")
// Overall statistics
println(s"Summary statistics accuracy= : ${metrics.accuracy}")
}
/***********************************************************************************************************
* ************************************5.5.2.2.3 MLlib API Clustering algorithms ******************************
* *****************************************************************************************************/
/*
* The latest doc https://spark.apache.org/docs/latest/mllib-clustering.html
* The list of MLlib classes representing different clustering algorithms includes
* - KMeans
* - StreamingKMeans
* - Bisecting k-means
* - GaussianMixture
* - Latent Dirichlet allocation (LDA)
* - PowerIterationClustering (PIC)
* MLlib also provides companion singleton objects with the same names.
* The methods provided for training clustering models are briefly described next.
*
* As we explained in previous class, clustering algo works on unknow data, which means no labels. So to train a
* clustering model, we need an RDD of Vector and hyperparameters
*
* The hyperparameter arguments and the type of the returned model depend on the clustering algorithm.
* For example, the hyperparameters accepted by the train method in the KMeans object include the number
* of clusters, maximum number of iterations in each run, number of parallel runs, initialization modes, and
* random seed value for cluster initialization. It returns an instance of the KMeansModel class.*/
/****************************************K-means*****************************************************/
def KMeansExample(spark:SparkSession):Unit={
// get data from file
val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/kmeans_data.txt"
val vectors=getClusterData(spark,filePath)
// println(s"vectors value: ${vectors.first().toString}")
//define hyperParameters
val numClusters = 2
val numIterations = 200
// train the kmean Model
val kMeanModel=KMeans.train(vectors,numClusters,numIterations)
// Evaluate clustering by computing within set sum of squared errors
/* The computeCost method returns the sum of the squared distances of the observations from their nearest
* cluster centers. It can be used to evaluate a KMeans model.*/
val WSSSE=kMeanModel.computeCost(vectors)
println(s"Within set sum of squared errors = ${WSSSE}")
// predict can return a cluster index for a given observation. It takes a Vector of features and return a Int
val obs1=Vectors.dense(0.0, 0.0, 0.0)
val obs2=Vectors.dense(9.0, 8.8, 9.9)
val obs3=Vectors.dense(5.5, 5.6, 5.0)
val index1=kMeanModel.predict(obs1)
val index2=kMeanModel.predict(obs2)
val index3=kMeanModel.predict(obs3)
// predict can take also RDD[Vector] as argument
println(s"obs1 in cluster ${index1}, obs2 in cluster ${index2},obs3 in cluster ${index3}")
/********************************model persistence*******************************************/
val modelSavePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/models/kmean-model"
kMeanModel.save(spark.sparkContext, modelSavePath)
val loadKMeanModel=KMeansModel.load(spark.sparkContext,modelSavePath)
val kMeansPMML=kMeanModel.toPMML()
println(s"kMeans model pmml value: ${kMeansPMML}")
}
/************************************PowerIteration***********************************************/
def PowerIterationExample(spark:SparkSession):Unit={
// get data from file
val circlesRdd = generateCirclesRdd(spark.sparkContext, 2, 100)
/* The power iteration does not take RDD[Vectors] as argument, It takes RDD[(Long,Long,Double)] */
val model = new PowerIterationClustering()
.setK(2)
.setMaxIterations(20)
.setInitializationMode("degree")
.run(circlesRdd)
val clusters = model.assignments.collect().groupBy(_.cluster).mapValues(_.map(_.id))
val assignments = clusters.toList.sortBy { case (k, v) => v.length }
val assignmentsStr = assignments
.map { case (k, v) =>
s"$k -> ${v.sorted.mkString("[", ",", "]")}"
}.mkString(", ")
val sizesStr = assignments.map {
_._2.length
}.sorted.mkString("(", ",", ")")
println(s"Cluster assignments: $assignmentsStr\ncluster sizes: $sizesStr")
}
/***********************************************************************************************************
* ****************************5.5.2.2.4 MLlib API Recommendation algorithms ******************************
* *****************************************************************************************************/
/* MLlib supports collaborative filtering, which learns latent factors describing users and products from
* a dataset containing only user identifiers, product identifiers, and ratings. Collaborative filtering-based
* recommendation system can be developed in MLlib using the ALS (alternating least squares) algorithm.
* MLlib provides a class named ALS, which implements Alternating Least Squares matrix factorization. It also
* provides a companion singleton object with the same name.
*
* The latest doc(spark2.3.1) https://spark.apache.org/docs/latest/mllib-collaborative-filtering.html
*
* MLlib supports both ratings and implicit feedbacks. A rating is available when a user explicitly rates a
* product. For example, users rate movies and shows on Netflix. Similarly, users rate songs on iTunes, Spotify,
* Pandora, and other music services. However, sometimes an explicit rating is not available, but an implicit
* preference can be determined from user activities. For example, purchase of a product by a user conveys
* user’s implicitly feedback for a product. Similarly, a user provides implicit feedback for a product by clicking
* a like or share button.
*
* The methods provided by the ALS object for training a recommendation model are briefly described next.*/
def ALSExample(spark:SparkSession):Unit={
//Read and parse rating data
val filePath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/sample_recommendation_data.txt"
val rating_data=spark.sparkContext.textFile(filePath).map{line=>line.split(",") match {
case Array(user,item,rate)=>Rating(user.toInt,item.toInt,rate.toDouble)
}
}
//specify hyperparameters
val rank=10
// this vaule must be small in local mode, it can case stack overflow exception
val numIteration=10
// build the recommendataion model using ALS
/* The train method of the ALS object trains or fits a MatrixFactorizationModel model with an RDD of Rating.
* It takes an RDD of Rating and ALS-specific hyperparameters as arguments and returns an instance of the
* MatrixFactorizationModel class. The hyperparameters for ALS include the number of latent features,
* number of iterations, regularization factor, level of parallelism, and random seed. The last three are optional.*/
val model = ALS.train(rating_data,rank,numIteration,0.01)
//Evaluate the model on rating data
val usersProducts=rating_data.map{case Rating(user,product,rate)=> (user,product)}
val predictRate=model.predict(usersProducts).map{
case Rating(user,product,rate)=>((user,product),rate)
}
val actualAndPredsRates = rating_data.map{case Rating(user,product,rate)=>((user,product),rate)}.join(predictRate)
//calculate the mean square error between the actual and predicte rate value
val MSE = actualAndPredsRates.map{case ((user,product),(actual,predict)) =>
val err=(actual-predict)
err*err
}.mean()
println(s"Mean squared Error =${MSE}")
/*********************************Model persistence*************************************/
val alsModelPath="/home/pliu/data_set/spark_data_set/spark_lessons/Lesson5_Spark_ML/models/ALS"
// model.save(spark.sparkContext,alsModelPath)
val loadedModel=MatrixFactorizationModel.load(spark.sparkContext,alsModelPath)
//ALS does not have toPMML() method
/***********************************TrainImplicit***********************************/
/* The trainImplicit method can be used when only implicit user feedback for a product is available. Similar
* to the train method, it takes an RDD of Rating and ALS-specific hyperparameters as arguments and returns
* an instance of the MatrixFactorizationModel class.
* The following code snippet uses implicit feedbacks and the ALS object to train a recommendation model.*/
//confidence parameter
val alpha=0.01
//regularization parameter
val lambda = 0.01
val implicitModel=ALS.trainImplicit(rating_data,rank,numIteration,lambda,alpha)
/*******************************************Recommendation *********************************************/
/**************predict rating between user and product*************************/
val uId=1
val pId=1
val predictedRate=model.predict(uId,pId)
println(s"predicted rate value is : ${predictedRate}")
/*The predict method also takes a RDD[(uid,pid)]*/
/******************recommand product for users *********************************/
/* recommendProducts method takes a uid and productNum as arguments and returns an Array of Rating. Each Rating
* object includes the given user id, product id and a predicted rating score. The returned Array is sorted by
* rating score in descending order. A high rating score indicates a strong recommendation.*/
val numProducts=2
val recommendedProducts=implicitModel.recommendProducts(uId,numProducts)
println(s"recommended products for user ${uId} is ${recommendedProducts.toArray.mkString(";")} ")
/*
* The recommendProductsForUsers method recommends the specified number of top products for all
* users. It takes the number of products to recommend as an argument and returns an RDD of users and
* corresponding top recommended products.*/
val recommededProductsForAllUsers=implicitModel.recommendProductsForUsers(numProducts)
println(s"recommended products for all users ${recommededProductsForAllUsers.take(4).toArray.mkString(";")}")
/******************recommand users for product *********************************/
/* The recommendUsers method recommends the specified number of users for a given product. This method
* returns a list of users who are most likely to be interested in a given product. It takes a product id and number
* of users to recommend as arguments and returns an Array of Rating. Each Rating object includes a user id,
* the given product id and a score in the rating field. The array is sorted by rating score in descending order*/
val usersNum=2
val recommendedUsers=implicitModel.recommendUsers(pId,usersNum)
println(s"The recommended users for product ${pId} are ${recommendedUsers.toArray.mkString(";")}")
/* The recommendUsersForProducts method recommends the specified number of users for all products.
* It takes the number of users to recommend as an argument and returns an RDD of products and
* corresponding top recommended users.*/
val recommendedUsersForAllProducts=implicitModel.recommendUsersForProducts(usersNum)
println(s"Recommeded Users for all products ${recommendedUsersForAllProducts.take(3).toArray.mkString(";")}")
}
/***********************************************************************************************************
* ****************************5.5.2.2.5 MLlib API Model Evaluation ******************************
* *****************************************************************************************************/
/*
* As mentioned in each model example, evaluating a machine learning model before it is used with new data is an
* important step. We often use quantitative metrics to evaluate the effectiveness of a model.
*
* MLlib comes prepackaged with classes that make it easy to evaluate models. These classes are available in the
* org.apache.spark.mllib.evaluation package. The list of model evaluation related classes includes:
* - BinaryClassificationMetrics
* - MulticlassMetrics
* - MultilabelMetrics
* - RankingMetrics
* - RegressionMetrics
*
* You can find doc of spark 2.3.1 https://spark.apache.org/docs/2.3.1/mllib-evaluation-metrics.html*/
/*************************************** Regression metrics ***********************************************/
/* The RegressionMetrics class can be used for evaluating models generated by regression algorithms. It provides
* methods for calculating the following metrics
* - mean squared error
* - root mean squared error
* - mean absolute error
* - R2
* - etc.
*
* You can find the example code in line 145 of section 5.5.2.2.1 MLlib api regression model
* */
/**************************************Binary Classification metrics******************************************/
/* The BinaryClassificationMetrics class can be used for evaluating binary classifiers. It provides
* methods for calculating receiver operating characteristic (ROC) curve, area under the receiver operating
* characteristic (AUC) curve, and other metrics.
*
* The example demonstrates how to use an instance of the BinaryClassificationMetrics
* class to evaluate a binary classifier can be found in line 272*/
/***********************************Multiclass Classification Metrics ***********************************/
/* The MulticlassMetrics class can be used for evaluating multi-class or multi-nominal classifiers. In a
* multi-class classification task, a label is not binary. An observation can take one of many possible labels.
* For example, a model that recognizes the images of animals is a multi-class classifier. An image can have the
* label cat, dog, lion, elephant, or some other label.
*
* For evaluating a multi-nominal classifier, the MulticlassMetrics class provides methods for
* calculating precision, recall, F-measure, and other metrics.
*
* The example code can be found in line 333 .*/
/*************************************** Multilabel Classification Metrics**********************************/
/* The MultilabelMetrics class can be used for evaluating multi-label classifiers. In a multi-label classification
* task, an observation can have more than one label. The difference between a between a multi-label and
* multi-class dataset is that labels are not mutually exclusive in a multi-label classification task, whereas labels
* are mutually exclusive in a multi-class classification task. An observation in a multi-class classification task
* can take on only one of many labels.
*
* An example of a multi-label classifier is a model that classifies an animal into different categories such
* as mammal, reptile, fish, bird, aquatic, terrestrial, or amphibian. An animal can belong to two categories;
* a whale is a mammal and an aquatic animal.*/
/*************************************** Recommendation Metrics ***************************************/
/* The RankingMetrics class can be used for evaluating recommendation models. It provides methods for
* quantifying the predictive effectiveness of a recommendation model.
*
* The metrics supported by the RankingMetrics class include
* - mean average precision
* - normalized discounted cumulative gain
* - precision at k.
* You can read details about these metrics in the paper titled, "IR evaluation methods for retrieving highly
* relevant documents" by <NAME> and <NAME>.*/
/* This function read a LibSVM files and return a train data as LabeledPoint type and test data as Vector*/
def getSplitedData(spark:SparkSession,filePath:String): (RDD[LabeledPoint], RDD[Vector],RDD[Double]) ={
val all:RDD[LabeledPoint]=MLUtils.loadLibSVMFile(spark.sparkContext,filePath)
all.cache()
val splited=all.randomSplit(Array(0.8,0.2),123456)
val train=splited(0)
// remove the label of test dat
val test=splited(1).map{
case observation:LabeledPoint=>observation.features
}
// remove the features
val testResult=splited(1).map{
case observation:LabeledPoint=>observation.label
}
/*println(s"training data has : ${train.count()} elements")
println(s"training data sample : ${train.first().toString()}")
println(s"test data has : ${test.count()} elements")
println(s"test data sample : ${test.first().toString()}")*/
return Tuple3(train,test,testResult)
}
/* This function read a LibSVM files and return a train data as LabeledPoint type and test data as Vector*/
def splitData(spark:SparkSession,filePath:String): (RDD[LabeledPoint], RDD[LabeledPoint]) ={
val all:RDD[LabeledPoint]=MLUtils.loadLibSVMFile(spark.sparkContext,filePath)
all.cache()
val splited=all.randomSplit(Array(0.8,0.2),123456)
val train=splited(0)
val test=splited(1)
/*println(s"training data has : ${train.count()} elements")
println(s"training data sample : ${train.first().toString()}")
println(s"test data has : ${test.count()} elements")
println(s"test data sample : ${test.first().toString()}")*/
return Tuple2(train,test)
}
def getClusterData(spark:SparkSession,filePath:String):RDD[Vector]={
// read the file and split each line into array of doubles
val arrayOfDoulbes=spark.sparkContext.textFile(filePath).map{
line=>line.split(" ").map(_.toDouble)
}
//convert array of doubles into dense vector
val vectors=arrayOfDoulbes.map{a=>Vectors.dense(a)}.cache()
return vectors
}
/************************************Begin of method for generating data for ********************************************/
//nCircles is the number of cluster, nPoints is the total point number in the dataset
def generateCirclesRdd(sc: SparkContext, nCircles: Int, nPoints: Int): RDD[(Long, Long, Double)] = {
val points = (1 to nCircles).flatMap { i =>
generateCircle(i, i * nPoints)
}.zipWithIndex
val rdd = sc.parallelize(points)
val distancesRdd = rdd.cartesian(rdd).flatMap { case (((x0, y0), i0), ((x1, y1), i1)) =>
if (i0 < i1) {
Some((i0.toLong, i1.toLong, gaussianSimilarity((x0, y0), (x1, y1))))
} else {
None
}
}
distancesRdd
}
def generateCircle(radius: Double, n: Int): Seq[(Double, Double)] = {
Seq.tabulate(n) { i =>
val theta = 2.0 * math.Pi * i / n
(radius * math.cos(theta), radius * math.sin(theta))
}
}
/**
* Gaussian Similarity: http://en.wikipedia.org/wiki/Radial_basis_function_kernel
*/
def gaussianSimilarity(p1: (Double, Double), p2: (Double, Double)): Double = {
val ssquares = (p1._1 - p2._1) * (p1._1 - p2._1) + (p1._2 - p2._2) * (p1._2 - p2._2)
math.exp(-ssquares / 2.0)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_5_2_1_MLlib_API_Data_Types.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_5_2_1_MLlib_API_Data_Types.scala
package org.pengfei.Lesson05_Spark_ML
import org.apache.log4j.{Level, Logger}
import org.apache.spark.mllib.linalg.distributed._
import org.apache.spark.sql.SparkSession
import org.apache.spark.mllib.linalg.{Matrices, Matrix, Vector, Vectors}
import org.apache.spark.mllib.recommendation.Rating
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
object Lesson05_5_2_1_MLlib_API_Data_Types {
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson5_5_2_MLlib_API").getOrCreate()
/***********************************************************************************************************
* ************************************5.5.2 MLlib API*************************************************
* *****************************************************************************************************/
/*
* The MLlib library can be used with applications developed in Scala, Java, Python, or R. This Lesson covers
* the Scala version of the MLlib API. The classes and singleton objects provided by MLlib are available under
* the org.apache.spark.mllib package.*/
/**************************************5.5.2.1 Basic Data Types******************************************/
/*
* MLlib’s primary data abstractions are
* - Local Vector
* - LabeledPoint
* - Local matrix
* - Distributed matrix
* -- Row matrix
* -- Indexed Row matrix
* -- Coordinate matrix
* -- Block matrix
* - Rating.
* The machine learning algorithms and statistical utilities in MLlib operate on data represented by these
* abstractions.*/
//LocalMatrixExample(spark)
DistributedMatrixExample(spark)
}
/*****************************************************************************************************
**************************************5.5.2.1.1 Local Vector ****************************************
* **********************************************************************************************/
def VectorExample(spark:SparkSession):Unit={
/* The Local Vector type represents an indexed collection of Double-type values with zero-based index of type Int.
* It is generally used for representing the features of an observation in a dataset. Conceptually, a Vector
* of length n represents an observation with n features. In other words, it represents an element in an
* n-dimensional space.
*
* The Vector type provided by MLlib should not be confused with the Vector type in the Scala collection
* library. They are different. The MLlib Vector type implements the concept of numeric vector from linear
* algebra. An application must import org.apache.spark.mllib.linalg.Vector to use the Vector trait
* provided by MLlib.
*
* The MLlib library supports two types of vectors:
* - dense
* - sparse
*
* The MLlib Vector type is defined as a trait, so an application cannot directly create an instance of Vector.
* Instead, it should use the factory methods provided by MLlib to create either an instance of the DenseVector
* or SparseVector class. These two classes implement the Vector trait. The factory methods for creating an
* instance of the DenseVector or SparseVector class are defined in the Vectors object.*/
/******************************************** Dense Vector **************************************/
/* An instance of the DenseVector class stores a double-type value at each index position. It is backed by an
* array. A dense vector is generally used if a dataset does not have too many zero values. It can be created, as
* shown here.*/
val denseVec:Vector=Vectors.dense(1.0,0.0,3.0)
/* The dense method creates an instance of the DenseVector class from the values provided to it as
* arguments. A variant of the dense method takes an Array of Double type as an argument and returns an
* instance of the DenseVector class. */
val denseVec1:Vector=Vectors.dense(Array(1.0,0.0,3.0))
/****************************************** Sparse Vector *******************************************/
/*
* The SparseVector class represents a sparse vector, which stores only non-zero values. It is an efficient data
* type for storing a large dataset with many zero values. An instance of the SparseVector class is backed by
* two arrays; one stores the indices for non-zero values and the other stores the non-zero values.*/
//For example, I want to create a sparseVector of (1.0,0.0,3.0), the sparse method takes three arguments. 1st is the
//size of vector, in our case is 3, the second argument is the array of index of non 0 value. the 3rd argument is the
//the array of values.
val sparseVec1=Vectors.sparse(3,Array(0,2),Array(1.0,3.0))
//You can also replace two arrays by a Seq of (index,value),
val sparseVec2=Vectors.sparse(3,Seq((0,1.0),(2,3.0)))
}
/*****************************************************************************************************
**************************************5.5.2.1.2 Labeled point ****************************************
* **********************************************************************************************/
def LabeledPointExample(spark:SparkSession):Unit={
/* The LabeledPoint type represents an observation in a labeled dataset. It contains both the label (dependent
* variable) and features (independent variables) of an observation. The label is stored as a Double-type value
* and the features are stored as a Vector type.
*
* An RDD of LabeledPoints is MLlib’s primary abstraction for representing a labeled dataset. Both
* regression and classification algorithms provided by MLlib operate only on RDD of LabeledPoints.
* Therefore, a dataset must be transformed to an RDD of LabeledPoints before it can be used to train a model.
*
* Since the label field in a LabeledPoint is of type Double, it can represent both numerical and categorical
* labels. When used with a regression algorithm, the label in a LabeledPoint stores a numerical value. For
* binary classification, a label must be either 0 or 1. 0 represents a negative label and 1 represents a positive
* label. For multi-class classification, labels should be class indices starting from zero: 0, 1, 2, ....
* */
/* The below code represent two observations with 1 label and 3 features, one pos and one neg, */
val positiveRow = LabeledPoint(1.0,Vectors.dense(10.0,30.0,20.0))
val negativeRow = LabeledPoint(0.0,Vectors.sparse(3,Array(0,2),Array(200.0,300.0)))
/* It is very common in practice to have sparse training data. MLlib supports reading training examples stored in
* LIBSVM format, which is the default format used by LIBSVM and LIBLINEAR. It is a text format in which each line
* represents a labeled sparse feature vector using the following format:
*
* label index1:value1 index2:value2 ...
*
* where the indices are one-based and in ascending order. After loading, the feature indices are converted
* to zero-based.
* */
}
/*****************************************************************************************************
**************************************5.5.2.1.3 Local matrix ****************************************
* **********************************************************************************************/
def LocalMatrixExample(spark:SparkSession):Unit={
/* A local matrix has integer-typed row and column indices and double-typed values, stored on a "single machine!!!".
* MLlib supports dense matrices, whose entry values are stored in a single double array in column-major order,
* and sparse matrices, whose non-zero entry values are stored in the Compressed Sparse Column (CSC) format in
* column-major order. For example, the following dense matrix
*
* 1.0 2.0
* 3.0 4.0
* 5.0 6.0
* is stored in a one-dimensional array [1.0,3.0,5.0,2.0,4.0,6.0] with the matrix size (3,2), you could notice
* the first three element is the column 1, the second three element is the column 2.
* */
/*Create a matrix:
* 9.0 0.0
* 0.0 8.0
* 0.0 6.0
*
* */
val denseMatrix:Matrix=Matrices.dense(3,2,Array(9.0,0.0,0.0,0.0,8.0,6.0))
/* sparse matrix is stored as compressed sparse column format. the sparse method takes 5 argument
* 1st arg is row size, 2nd arg is column size, 3rd is array of ColPtr, 4th is array of rowIndex, 5th
* is array of value
* https://medium.com/@rickynguyen/getting-started-with-spark-day-5-36b62a6d13bf explains well how to
* calculate */
val sparseMatrix:Matrix=Matrices.sparse(3,2,Array(0, 1,3), Array(0, 1, 2), Array(9, 6, 8))
println(sparseMatrix.toString())
}
/*****************************************************************************************************
**************************************5.5.2.1.4 Distributed matrix *********************************
* **********************************************************************************************/
def DistributedMatrixExample(spark:SparkSession):Unit= {
/* A distributed matrix has long-typed row and column indices and double-typed values, stored distributively
* in one or more RDDs. It is very important to choose the right format to store large and distributed matrices.
* Converting a distributed matrix to a different format may require a global shuffle, which is quite expensive.
* Four types of distributed matrices have been implemented so far:
* - RowMatrix
* - IndexedRowMatrix
* - CoordinateMatrix
* - BlockMatrix*/
/****************************************RowMatrix**********************************************/
/* A RowMatrix is a row-oriented distributed matrix without meaningful row indices, e.g., a collection of
* feature vectors. It is backed by an RDD of its rows, where each row is a local vector. We assume that the
* number of columns is not huge for a RowMatrix so that a single local vector can be reasonably communicated
* to the driver and can also be stored / operated on using a single node.
*
* A RowMatrix can be created from an RDD[Vector] instance. Then we can compute its column summary
* statistics and decompositions. QR decomposition is of the form A = QR where Q is an orthogonal matrix
* and R is an upper triangular matrix. For singular value decomposition (SVD) and principal component
* analysis (PCA), please refer to Dimensionality reduction.
*
*
*/
val rows=Array(Vectors.dense(1.0,2.0,3.0),
Vectors.dense(1.0,2.0,3.0),
Vectors.dense(1.0,2.0,3.0),
Vectors.dense(1.0,2.0,3.0))
//Build RDD[Vector]
val vecRDD=spark.sparkContext.parallelize(rows)
//Build Matrix
val mat:RowMatrix=new RowMatrix(vecRDD)
//Get its size
val m=mat.numRows()
val n=mat.numCols()
//QR decomposition
val qrResult=mat.tallSkinnyQR(true)
println(s"matrix has ${m} rows, ${n} columns, qrResult is ${qrResult.toString}")
/***********************************************IndexedRow Matrix********************************************/
/* An IndexedRowMatrix can be created from an RDD[IndexedRow] instance, where IndexedRow is a wrapper over
* (Long, Vector). An IndexedRowMatrix can be converted to a RowMatrix by dropping its row indices.*/
val indexedRows=Array(IndexedRow(0,Vectors.dense(1.0,2.0,3.0)),
IndexedRow(1,Vectors.dense(1.0,2.0,3.0)),
IndexedRow(2,Vectors.dense(1.0,2.0,3.0)),
IndexedRow(3,Vectors.dense(1.0,2.0,3.0)))
val indexedRowRDD=spark.sparkContext.parallelize(indexedRows)
val indexMat:IndexedRowMatrix=new IndexedRowMatrix(indexedRowRDD)
// Get its size.
val indexM = indexMat.numRows()
val indexN = indexMat.numCols()
println(s"matrix has ${indexM} rows, ${indexN} columns")
//Covert it to rowMatrix, which means drop its row indices
val rowMat:RowMatrix=indexMat.toRowMatrix()
/*********************************************CoordinateMatrix******************************************************/
/* A CoordinateMatrix is a distributed matrix backed by an RDD of its entries. Each entry is a tuple of
* (i: Long, j: Long, value: Double), where i is the row index, j is the column index, and value is the entry value.
* A CoordinateMatrix should be used only when both dimensions of the matrix are huge and the matrix is very sparse.*/
val matrixEntries=Array(MatrixEntry(0,1,1.0),
MatrixEntry(10,1,2.0),
MatrixEntry(100,2,3.0)
)
val matrixRDD:RDD[MatrixEntry]=spark.sparkContext.parallelize(matrixEntries)
val coordinateMat:CoordinateMatrix=new CoordinateMatrix(matrixRDD)
//get its size
val coordinateM = coordinateMat.numRows()
val coordinateN = coordinateMat.numCols()
println(s"matrix has ${coordinateM} rows, ${coordinateN} columns")
/*********************************************BlockMatrix******************************************************/
/* A BlockMatrix is a distributed matrix backed by an RDD of MatrixBlocks, where a MatrixBlock is a tuple of
* ((Int, Int), Matrix), where the (Int, Int) is the index of the block, and Matrix is the sub-matrix at the
* given index with size rowsPerBlock x colsPerBlock. BlockMatrix supports methods such as add and multiply
* with another BlockMatrix. BlockMatrix also has a helper function validate which can be used to check
* whether the BlockMatrix is set up properly.*/
//We can convert a coordinateMatrix to a BlockMatrix
val blockMat:BlockMatrix=coordinateMat.toBlockMatrix().cache()
// Validate whether the BlockMatrix is set up properly. Throws an Exception when it is not valid.
// Nothing happens if it is valid.
blockMat.validate()
// Calculate A transpose(invert col and row index) times A.
val ata = blockMat.transpose.multiply(blockMat)
println(s"ata result ${ata.toString}")
}
/*****************************************************************************************************
**************************************5.5.2.1.5 Rating *********************************************
* **********************************************************************************************/
def RatingExample(spark:SparkSession):Unit={
/*
* The Rating type is used with recommendation algorithms. It represents a user’s rating for a product
* or item. A training dataset must be transformed to an RDD of Ratings before it can be used to train a
* recommendation model.
*
* Rating is defined as a case class consisting of three fields. The first field is named user, which is of type
* Int. It represents a user identifier. The second field is named product, which is also of type Int. It represents
* a product or item identifier. The third field is named rating, which of type Double.*/
val rating=Rating(100,10,3.0)
/* This code creates an instance of the Rating class. This instance represents a rating of 3.0 given by a user
* with identifier 100 to a product with identifier 10.*/
}
} |
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/SteamingSource.scala | <gh_stars>0
package org.pengfei.Lesson06_Spark_Streaming
import java.io.PrintWriter
import java.net.ServerSocket
import scala.io.Source
object SteamingSource {
def index(n:Int)=scala.util.Random.nextInt(n)
def main(args:Array[String]): Unit ={
// This object main takes three arguments, 1st is filePath, 2nd port number, 3rd is timeInterval
// verify args length
if(args.length !=3){
System.err.println("Usage: <fileName> <port> <millisecond>")
System.exit(1)
}
// assign args
val fileName= args(0)
val lines= Source.fromFile(fileName).getLines().toList
val fileRowNum=lines.length
val port:Int=args(1).toInt
val timeInterval:Long=args(2).toLong
// set up server socket with the given port
val listener = new ServerSocket(port)
//always running and waiting for new client connection
while(true){
// When a new client connected, starts a new thread to treat client request
val socket=listener.accept()
new Thread(){
override def run={
// get client info
println("Got client connected from: " + socket.getInetAddress)
val out = new PrintWriter(socket.getOutputStream(), true)
while(true){
Thread.sleep(timeInterval)
//get a random line of the file and send it to client
val content=lines(index(fileRowNum))
println("-------------------------------------------")
println(s"Time: ${System.currentTimeMillis()}")
println("-------------------------------------------")
println(content)
out.write(content + '\n')
out.flush()
}
socket.close()
}
}.start()
}
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/Sanfransico911.scala | package org.pengfei.spark.application.example
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
//dependencies for timestamp functions (e.g. year, totimestamp)
import org.apache.spark.sql.functions._
object Sanfransico911 {
def main (args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local"). //spark://10.70.3.48:7077 remote
appName("Sanfransico911").
getOrCreate()
import spark.implicits._
/*
* read data from csv file
* */
//val inputFile="hdfs://hadoop-nn.bioaster.org:9000/test_data/iot_devices.json"
val inputFile="hdfs://localhost:9000/test_data/Fire_Department_Calls_for_Service.csv"
val fireSchema = StructType(Array(
StructField("CallNumber", IntegerType, true),
StructField("UnitID", StringType, true),
StructField("IncidentNumber", IntegerType, true),
StructField("CallType", StringType, true),
StructField("CallDate", StringType, true),
StructField("WatchDate", StringType, true),
StructField("ReceivedDtTm", StringType, true),
StructField("EntryDtTm", StringType, true),
StructField("DispatchDtTm", StringType, true),
StructField("ResponseDtTm", StringType, true),
StructField("OnSceneDtTm", StringType, true),
StructField("TransportDtTm", StringType, true),
StructField("HospitalDtTm", StringType, true),
StructField("CallFinalDisposition", StringType, true),
StructField("AvailableDtTm", StringType, true),
StructField("Address", StringType, true),
StructField("City", StringType, true),
StructField("ZipcodeofIncident", IntegerType, true),
StructField("Battalion", StringType, true),
StructField("StationArea", StringType, true),
StructField("Box", StringType, true),
StructField("OriginalPriority", StringType, true),
StructField("Priority", StringType, true),
StructField("FinalPriority", IntegerType, true),
StructField("ALSUnit", BooleanType, true),
StructField("CallTypeGroup", StringType, true),
StructField("NumberofAlarms", IntegerType, true),
StructField("UnitType", StringType, true),
StructField("Unitsequenceincalldispatch", IntegerType, true),
StructField("FirePreventionDistrict", StringType, true),
StructField("SupervisorDistrict", StringType, true),
StructField("NeighborhoodDistrict", StringType, true),
StructField("Location", StringType, true),
StructField("RowID", StringType, true)))
val df = spark.read.format("com.databricks.spark.csv").option("header", "true").schema(fireSchema).load(inputFile)
//df.show(5)
//df.printSchema()
//println(df.getClass.getName)
//Q1.How many different types of calls were made to the Fire Department
//val calltype=df.select(df("CallType")).distinct()
//println(calltype.count)
//calltype.show(5)
df.createOrReplaceTempView("911_call")
//Q2. How many incidents of each call type were there?
val callNumOfType=df.select(df("CallType"),df("RowID")).groupBy("CallType")
//callNumOfType.sort($"count")
val call=spark.sql("select CallType, count(distinct RowID) as call_num from 911_call group by CallType order by call_num desc limit 100")
//call.show(5)
//Q-3) How many years of Fire Service Calls is in the data file?
val call_year=df.select(df("CallDate"))
//We can notice that the date is in string format, we need to convert them into date
//call_year.show(5)
val from_pattern1 = "MM/dd/yyyy"
val to_pattern1 = "yyyy-MM-dd"
val from_pattern2 = "MM/dd/yyyy hh:mm:ss aa"
val to_pattern2 = "MM/dd/yyyy hh:mm:ss aa"
val dateDf = df.withColumn("CallDateTS", to_timestamp($"CallDate", from_pattern1)).drop("CallDate")
.withColumn("WatchDateTS", to_timestamp($"WatchDate", from_pattern1)).drop("WatchDate")
.withColumn("ReceivedDtTmTS", to_timestamp($"ReceivedDtTm", from_pattern2)).drop("ReceivedDtTm")
.withColumn("EntryDtTmTS", to_timestamp($"EntryDtTm", from_pattern2)).drop("EntryDtTm")
.withColumn("DispatchDtTmTS", to_timestamp($"DispatchDtTm", from_pattern2)).drop("DispatchDtTm")
.withColumn("ResponseDtTmTS", to_timestamp($"ResponseDtTm", from_pattern2)).drop("ResponseDtTm")
.withColumn("OnSceneDtTmTS", to_timestamp($"OnSceneDtTm", from_pattern2)).drop("OnSceneDtTm")
.withColumn("TransportDtTmTS", to_timestamp($"TransportDtTm", from_pattern2)).drop("TransportDtTm")
.withColumn("HospitalDtTmTS", to_timestamp($"HospitalDtTm", from_pattern2)).drop("HospitalDtTm")
.withColumn("AvailableDtTmTS", to_timestamp($"AvailableDtTm", from_pattern2)).drop("AvailableDtTm")
// dateDf.select(year($"CallDateTS")).distinct().orderBy(year($"CallDateTS").desc).show()
//dateDf.printSchema()
// Q-4) How many service calls were logged in the past 7 days?
// Suppose that today is July 6th, is the 187th day of the year. Filter the DF down to just 2016 and days of year greater than 180:
val last7day=dateDf.filter(year($"CallDateTS")===2016).filter(dayofyear($"CallDateTS")>=180).filter(dayofyear($"CallDateTS")<=187).select($"CallDateTS").distinct().orderBy($"CallDateTS".desc)
last7day.show(10)
}
}
/*
*|-- CallNumber: integer (nullable = true)
|-- UnitID: string (nullable = true)
|-- IncidentNumber: integer (nullable = true)
|-- CallType: string (nullable = true)
|-- CallDate: string (nullable = true)
|-- WatchDate: string (nullable = true)
|-- ReceivedDtTm: string (nullable = true)
|-- EntryDtTm: string (nullable = true)
|-- DispatchDtTm: string (nullable = true)
|-- ResponseDtTm: string (nullable = true)
|-- OnSceneDtTm: string (nullable = true)
|-- TransportDtTm: string (nullable = true)
|-- HospitalDtTm: string (nullable = true)
|-- CallFinalDisposition: string (nullable = true)
|-- AvailableDtTm: string (nullable = true)
|-- Address: string (nullable = true)
|-- City: string (nullable = true)
|-- ZipcodeofIncident: integer (nullable = true)
|-- Battalion: string (nullable = true)
|-- StationArea: string (nullable = true)
|-- Box: string (nullable = true)
|-- OriginalPriority: string (nullable = true)
|-- Priority: string (nullable = true)
|-- FinalPriority: integer (nullable = true)
|-- ALSUnit: boolean (nullable = true)
|-- CallTypeGroup: string (nullable = true)
|-- NumberofAlarms: integer (nullable = true)
|-- UnitType: string (nullable = true)
|-- Unitsequenceincalldispatch: integer (nullable = true)
|-- FirePreventionDistrict: string (nullable = true)
|-- SupervisorDistrict: string (nullable = true)
|-- NeighborhoodDistrict: string (nullable = true)
|-- Location: string (nullable = true)
|-- RowID: string (nullable = true)
*
* */ |
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/feature/select/FeatureSelection_ChiSqSelector.scala | <filename>WordCount/src/main/java/org/pengfei/spark/ml/feature/select/FeatureSelection_ChiSqSelector.scala
package org.pengfei.spark.ml.feature.select
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.ChiSqSelector
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession
object FeatureSelection_ChiSqSelector {
def main(args:Array[String])={
/*
* Feature selection is a process which finds better feature data, eliminate useless (irrelevant) data
* There are many feature selection methods available such as mutual information, information gain, and
* chi square test.
*
* Just as machine learning, there are supervised and unsupervised feature selection method
*
* Chi square test is a common supervised feature selection method which we will use in this tutorial
*
* */
/*
* Problem Statement
*
* Suppose there are N instances, and two classes: positive and negative. Given a feature X, we can use Chi Square
* Test to evaluate its importance to distinguish the class.
*
* By calculating the Chi square scores for all the features, we can rank the features by the chi square scores,
* then choose the top ranked features for model training.
*
* This method can be easily applied for text mining, where terms or words or N-grams are features.
* After applying chi square test, we can select the top ranked terms as the features to build a text mining model.
* */
/*
* Understand Chi Square Test
*
* Chi Square Test is used in statistics to test the independence of two events. Given dataset about two events,
* we can get the observed count O and the expected count E. Chi Square Score measures how much the expected counts
* E and observed Count O derivate from each other.
*
* In feature selection, the two events are occurrence of the feature and occurrence of the class.
*
* In other words, we want to test whether the occurrence of a specific feature and the occurrence of a specific
* class are independent.
*
* If the two events are dependent, we can use the occurrence of the feature to predict the occurrence of the class.
* We aim to select the features, of which the occurrence is highly dependent on the occurrence of the class.
*
* When the two events are independent, the observed count is close to the expected count, thus a small chi square
* score. So a high value of \chi^2 indicates that the hypothesis of independence is incorrect. In other words,
* the higher value of the \chi^2 score, the more likelihood the feature is correlated with the class, thus it
* should be selected for model training.
* */
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("FeatureSelection_ChiSqSelector").
getOrCreate()
//spark.conf.set("")
import spark.implicits._
val df = spark.createDataFrame(Seq(
(1, Vectors.dense(0.0, 0.0, 18.0, 1.0), 1),
(2, Vectors.dense(0.0, 1.0, 12.0, 0.0), 0),
(3, Vectors.dense(1.0, 0.0, 15.0, 0.1), 0)
)).toDF("id","features","label")
println(df.show())
val selector = new ChiSqSelector()
.setNumTopFeatures(1)
.setFeaturesCol("features")
.setLabelCol("label")
.setOutputCol("selected-feature")
val selector_model = selector.fit(df)
val result= selector_model.transform(df)
println(result.show(false))
}
}
|
pengfei99/Spark | common_utils/scala/data_validation.scala | /**
* This function counts the null cell number
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colName second column value to be merged
* @return Long, It returns the number of null cell
**/
def getNullCount(df: DataFrame, colName: String): Long = {
df.select(colName).filter(col(colName).isNull).count()
}
/**
* This function counts the null cell number for all columns of the source data frame
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @return DataFrame, It returns a data frame which contains three columns ("column_name","null_cell_count","null_cell_percentage")
**/
def getNullCountForAllCols(df: DataFrame): DataFrame = {
val spark = df.sparkSession
val totalLineNb = df.count()
import spark.implicits._
val buf = scala.collection.mutable.ListBuffer.empty[(String, Long, Double)]
for (colName <- df.columns) {
val nulLineNb = getNullCount(df, colName)
val nullPercentage: Double = (nulLineNb.toDouble / totalLineNb.toDouble) * 100
buf.append((colName, nulLineNb, nullPercentage))
}
val result = buf.toList.toDF("column_name", "null_cell_count", "null_cell_percentage")
return result
}
/**
* This function counts the not a number cell number
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colName second column value to be merged
* @return Long, It returns the number of null cell
**/
def getNaNCount(df: DataFrame, colName: String): Long = {
df.select(colName).filter(col(colName).isNaN).count()
}
/**
* This function uses regular expression to check if a string value is a digit or not.
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param value A string value
* @return Boolean
**/
def isDigit(value: String): Boolean = {
if (value ==null) return true
if (value.equals(" ")) return false
else {
// we use regular expression,
// ^-? : It means it may starts with -
// [0-9]+ : followed by at least one digit between 0 and 9
// (\.|,) : It can be separated by . or , we need protect . with \. because its a key word in regular expression.
// [0-9]+ : followed by at least one digit.
// ((\.|,)[0-9]+)? : means this is optional.
return value.matches("^-?[0-9]+((\\.|,)[0-9]+)?$")
}
}
/**
* This function returns a new data frame which contains a new column which indicates the target column contains
* digits or not
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colName the column name which we need to check if it contains no digit number or not
* @return DataFrame
**/
def getIsDigitDF(df: DataFrame, colName: String): DataFrame = {
val spark=df.sparkSession
import spark.implicits._
//register a udf for isDigit method
spark.udf.register("isDigit",(arg:String)=>isDigit(arg))
//create column colName_isDigit,
df.withColumn(s"${colName}_isDigit", expr(s"isDigit($colName)"))
}
/**
* This function is the overload version of getIsDigitDF, it takes a list of column names, and returns a new data
* frame which contains a new column for each target column which indicates the target column contains
* digits or not
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colNames A list of the column name which we need to check if it contains no digit number or not
* @return DataFrame
**/
def getIsDigitDF(df: DataFrame, colNames: List[String]): DataFrame = {
var result=df
for(colName<-colNames){
result=getIsDigitDF(result,colName)
}
return result
}
/**
* This function takes a data frame produced by getIsDigitDF, it counts the cell that is not a digit
* and calculates a percentage based on the total number, then returns these information as a data frame
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @return DataFrame
**/
def getIsNotDigitCount(df: DataFrame): DataFrame = {
val spark = df.sparkSession
import spark.implicits._
// get column names as an array
val colNames = df.columns.toArray
val totalCount = df.count()
// create a buffer to store result before converting to data frame
val buf = scala.collection.mutable.ListBuffer.empty[(String, Long, Double)]
for (colName <- colNames) {
val index = colName.lastIndexOf("_isDigit")
if (index > 1) {
val sourceColName = colName.substring(0, index)
val noDigitCount = df.filter(col(colName) === false).count()
val percentage: Double = (noDigitCount.toDouble / totalCount.toDouble) * 100
buf.append((sourceColName, noDigitCount, percentage))
}
}
buf.toList.toDF("column_name", "isNotDigit_cell_count", "isNotDigit_cell_percentage")
}
/**
* This function takes a data frame produced by getIsDigitDF, it shows distinct values of the cell that is not a digit
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colNames list of column names that contains cell which is not digit
* @return DataFrame
**/
def showNotDigitValues(df:DataFrame,colNames:List[String])={
for(colName<-colNames){
val badValues= df.filter(col(s"${colName}_isDigit")===false).select(colName).distinct()
badValues.show(badValues.count().toInt,false)
}
}
/**
* This function takes a data frame produced by getIsDigitDF, it returns a new data frame which contains only the
* lines with bad values(String value in a digit column).
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @return DataFrame
**/
def showLinesWithBadValue(df:DataFrame):DataFrame={
val spark=df.sparkSession
// get column names as an Array
val colNames=df.columns.toArray
// get schema of the data frame
val schema=df.schema
// to create an empty data frame with a specific schema, we need to use the sparkContext to create an empty RDD
val sc=spark.sparkContext
var result:DataFrame=spark.createDataFrame(sc.emptyRDD[Row], schema)
for(colName<-colNames){
if(colName.contains("_isDigit")){
result=result.union(df.filter(col(colName)===false))
}
}
return result
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_5_1_Spark_Structure_Streaming.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson06_Spark_Streaming
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.StructType
object Lesson06_5_1_Spark_Structure_Streaming {
/*****************************************************************************************************************
* ********************************** 6.5.1 Structure Streaming *************************************************
* ***************************************************************************************************************/
/*
* Spark official doc: https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#programming-model
* Structured Streaming is a scalable and fault-tolerant stream processing engine built on the ""Spark SQL engine"".
* You can express your streaming computation the same way you would express a batch computation on static data.
* The Spark SQL engine will take care of running it incrementally and continuously and updating the final result
* as streaming data continues to arrive. You can use the Dataset/DataFrame API in Scala, Java, Python or R to
* express streaming aggregations, event-time windows, stream-to-batch joins, etc. The computation is executed
* on the same optimized Spark SQL engine. Finally, the system ensures end-to-end exactly-once fault-tolerance
* guarantees through checkpointing and Write Ahead Logs. In short, Structured Streaming provides fast, scalable,
* fault-tolerant, end-to-end exactly-once stream processing without the user having to reason about streaming.
*
* Internally, by default, Structured Streaming queries are processed using a micro-batch processing engine, which
* processes data streams as a series of small batch jobs thereby achieving end-to-end latencies as low as
* 100 milliseconds and exactly-once fault-tolerance guarantees. However, since Spark 2.3, we have introduced
* a new low-latency processing mode called Continuous Processing, which can achieve end-to-end latencies as low as
* 1 millisecond with at-least-once guarantees. Without changing the Dataset/DataFrame operations in your queries,
* you will be able to choose the mode based on your application requirements.
*
* In this guide, we are going to walk you through the programming model and the APIs. We are going to explain
* the concepts mostly using the default micro-batch processing model, and then later discuss Continuous Processing
* model. */
/**/
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder.appName("Lesson6_5_Spark_Structure_Streaming").master("local[2]").getOrCreate()
/********************************************First Example********************************/
// FirstExample(spark)
/*******************************************Input source Example ***********************************/
FileInputSourceExample(spark)
}
/*****************************************************************************************************************
* ********************************** 6.5.2 First Example *************************************************
* ***************************************************************************************************************/
def FirstExample(spark:SparkSession):Unit={
/*
* Let’s say you want to maintain a running word count of text data received from a data server listening on a TCP
* socket. Let’s see how you can express this using Structured Streaming. First, we have to import the necessary
* classes and create a local SparkSession, the starting point of all functionalities related to Spark.
* */
import spark.implicits._
val host="localhost"
val port=9999
val lines=spark.readStream.format("socket").option("host",host).option("port",port).load()
//spark.readStream returns a untyped dataframe.
println(s"${lines.getClass.getName}")
lines.isStreaming // Returns True for DataFrames that have streaming sources
lines.printSchema
// As the dataframe is untyped, meaning that the schema of the Dataframe is not checked at compile time.
// only checked at runtime when the query is submitted. Some operations like map, flatMap, etc. need the type
// to be known at compile time. The below example, to use flatmap, we have converted the DataFrame to a
// Dataset of String using .as[String]
val words=lines.as[String].flatMap(_.split(" "))
val wordCounts = words.groupBy("value").count()
/* This lines DataFrame represents an unbounded table containing the streaming text data. This table contains
* one column of strings named “value”, and each line in the streaming text data becomes a row in the table. Note,
* that this is not currently receiving any data as we are just setting up the transformation, and have not yet
* started it. flatMap operation to split each line into multiple words. The resultant words Dataset contains all the words.
* Finally, we have defined the wordCounts DataFrame by grouping by the unique values in the Dataset and counting
* them. Note that this is a streaming DataFrame which represents the running word counts of the stream.
* */
/* We have the data streaming data processing logic, Now we need to start receiving data and use the above logic.
* The below code shows us how to print the complete set of counts (outputMode("complete")) to the console every
* time they are updated. And the start method start the streaming computation*/
val query=wordCounts.writeStream.outputMode("complete").format("console").start()
query.awaitTermination()
}
/*****************************************************************************************************************
* ********************************** 6.5.3 Structure Streaming Concept *****************************************
* ***************************************************************************************************************/
/* The key idea in Structured Streaming is to treat a live data stream as a table that is being continuously appended.
* This leads to a new stream processing model that is very similar to a batch processing model. You will express your
* streaming computation as standard batch-like query as on a static table, and Spark runs it as an incremental query
* on the unbounded input table. Let’s understand this model in more detail.*/
/*****************************************6.5.3.1 Basic Concepts************************************************/
/* Consider the input data stream as the "Input table". Every data item that is arriving on the stream is like a
* new row being appended to the input table.
*
* A query on the input will generate the "Result Table". Every trigger interval (for example, every 1 second),
* new rows get appended to the input table. Which eventually updates the Result table. Whenever the result table
* gets updated, we would want to write the changed result rows to an example sink.
*
* The "output" can have 3 different mode:
* - Complete Mode : The entire updated Result Table will be written to the external storage. It is up to the storage
* connector to decide how to handle writing of the entire table. Often used in query with
* aggregation computation.
* - Append Mode (default) : Only the new rows appended in the Result Table since the last trigger will be written to the
* external storage. This is applicable only on the queries where existing rows in the Result Table
* are not expected to change. Hence this mode guarantees that each row will be output only once
* (assuming fault-tolerant sink)
* - Update Mode : Only the rows that we updated in the Result Table since the last trigger will be written to the
* external storage (available since Spark 2.1.1). Note that this is different from the complete Mode
* in that this mode only outputs the rows that have changed since the last trigger. If the query
* does not contain aggregations. It will be equivalent to Append mode.
*
* Note that Structured Streaming does not materialize the entire table. It reads the latest available data from
* the streaming data source, processes it incrementally to update the result, and then discards the source data.
* It only keeps around the minimal intermediate state data as required to update the result
* (e.g. intermediate counts in the earlier example).
*
* This model is significantly different from many other stream processing engines. Many streaming systems require
* the user to maintain running aggregations themselves, thus having to reason about fault-tolerance, and data
* consistency (at-least-once, or at-most-once, or exactly-once). In this model, Spark is responsible for updating
* the Result Table when there is new data, thus relieving the users from reasoning about it.
*
*
* Different types of streaming queries support different output modes. Here is the compatibility matrix.
*
* */
/******************************** 6.5.3.2 Handling Event-time and Late Data ****************************************/
/* Event-time is the time embedded in the data itself. For many applications, you may want to operate on this
* event-time. For example, if you want to get the number of events generated by IoT devices every minute, then you
* probably want to use the time when the data was generated (that is, event-time in the data), rather than the time
* Spark receives them. This event-time is very naturally expressed in this model – each event from the devices is
* a row in the table, and event-time is a column value in the row. This allows window-based aggregations
* (e.g. number of events every minute) to be just a special type of grouping and aggregation on the event-time column
* – each time window is a group and each row can belong to multiple windows/groups. Therefore, such
* event-time-window-based aggregation queries can be defined consistently on both a static dataset
* (e.g. from collected device events logs) as well as on a data stream, making the life of the user much easier.
*
* Furthermore, this model naturally handles data that has arrived later than expected based on its event-time.
* Since Spark is updating the Result Table, it has full control over updating old aggregates when there is late data,
* as well as cleaning up old aggregates to limit the size of intermediate state data. Since Spark 2.1, we have support
* for watermarking which allows the user to specify the threshold of late data, and allows the engine to accordingly
* clean up old state. These are explained later in more detail in the Window Operations section.*/
/******************************** 6.5.3.3 Fault Tolerance Semantics ****************************************/
/* Delivering end-to-end exactly-once semantics was one of key goals behind the design of Structured Streaming.
* To achieve that, we have designed:
* - Structured Streaming sources
* - sinks
* - execution engine
* to reliably track the exact progress of the processing so that it can handle any kind of failure by restarting
* and/or reprocessing. Every streaming source is assumed to have offsets (similar to Kafka offsets, or Kinesis
* sequence numbers) to track the read position in the stream. The engine uses checkpointing and write ahead logs
* to record the offset range of the data being processed in each trigger. The streaming sinks are designed to be
* idempotent for handling reprocessing. Together, using replayable sources and idempotent sinks, Structured Streaming
* can ensure end-to-end exactly-once semantics under any failure.*/
/***************************************** 6.5.3.4 Data types ********************************************/
/* Since Spark 2.0, DataFrames and Datasets can represent static, bounded data, as well as streaming, unbounded data.
* Similar to static Datasets/DataFrames, you can use the common entry point SparkSession to create streaming
* DataFrames/Datasets from streaming sources, and apply the same operations on them as static DataFrames/Datasets.
* If you are not familiar with Datasets/DataFrames, you can revisit Lesson4 spark sql*/
/*****************************************************************************************************************
* ***************************** 6.5.4 Creating streaming DataFrames and DataSets *******************************
* ***************************************************************************************************************/
/* Streaming DataFrames can be created through the DataStreamReader interface (Scala/Java/Python) returned by
* SparkSession.readStream(). In R, with the read.stream() method. Similar to the read interface for creating
* static DataFrame, you can specify the details of the source – data format, schema, options, etc.*/
/***************************** 6.5.4.1 Input Sources **********************************************/
/* There a few built-in sources :
* - File source : Reads files written in a directory as a stream of dat. Supported file formats are text, csv, json,
* orc, parquet. See the following link for more details :
* https://spark.apache.org/docs/2.3.1/api/java/org/apache/spark/sql/streaming/DataStreamReader.html
* Note that the files must be atomically placed in the given directory, which in most file systems,
* can be achieved by the file move operations.
*
* - Kafka source : Reads data from Kafka. It's compatible with Kafka broker version 0.10.0 or higher. See the Kafka
* integration doc (https://spark.apache.org/docs/latest/structured-streaming-kafka-integration.html)
*
* - Socket source (For testing): Reads UTF8 text data from a socket connection. The listening server socket is at the
* driver. Note that this should be used only for testing as this does not provide
* end-to-end fault-tolerance guarantees.
*
* - Rate source (for testing) - Generates data at the specified number of rows per second, each output row contains a
* timestamp and value. Where timestamp is a Timestamp type containing the time of message
* dispatch, and value is of Long type containing the message count, starting from 0 as
* the first row. This source is intended for testing and benchmarking.
*
* Note that socket source is not fault-tolerant, because they do not guarantee that data can be replayed using
* checkpointed offsets after a failure.
* */
def FileInputSourceExample(spark:SparkSession):Unit={
/* Note that file input source works like the streaming context file source. The update of file content will not be
* picked up by the stream. Only the creating of new file can be picked up by the stream.*/
/**************************************Csv file inut source *********************************************/
CSVInputSourceExample(spark)
}
/* This example read csv files, and return a df which contains user older than 23. */
def CSVInputSourceExample(spark:SparkSession):Unit={
val path="/DATA/data_set/spark/basics/Lesson6_Spark_Streaming/structrued_streaming"
val userSchema = new StructType().add("name","string").add("age","integer")
val userDf=spark.readStream.option("sep",";").schema(userSchema).csv(path)
val oldDf=userDf.filter(userDf("age")>23)
val query=oldDf.writeStream.outputMode("update").format("console").start()
query.awaitTermination()
}
/*************************************Kafka input source ***********************************************/
/* See the section 6.5.6 Kafka integration*/
/****************** 6.5.4.2 Schema inference and partition of streaming DataFrames/DataSets **********************/
/* By default, Structured Streaming from file based sources requires you to specify the schema, rather than rely on
* Spark to infer it automatically. This restriction ensures a consistent schema will be used for the streaming query,
* even in the case of failures. For ad-hoc use cases, you can re-enable schema inference by setting
* spark.sql.streaming.schemaInference to true.
*
* Partition discovery does occur when subdirectories that are named /key=value/ are present and listing will
* automatically recurse into these directories. If these columns appear in the user provided schema, they will be
* filled in by Spark based on the path of the file being read. The directories that make up the partitioning scheme
* must be present when the query starts and must remain static. For example, it is okay to add /data/year=2016/ when
* /data/year=2015/ was present, but it is invalid to change the partitioning column (i.e. by creating the
* directory /data/date=2016-04-17/).
* */
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson16_Analyzing_Geospatial_Temporal_Data/Lesson16_Analyzing_Geo_Temporal_Data.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson16_Analyzing_Geospatial_Temporal_Data/Lesson16_Analyzing_Geo_Temporal_Data.scala
package org.pengfei.Lesson16_Analyzing_Geospatial_Temporal_Data
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import spray.json.{JsObject, JsString, JsValue, RootJsonFormat}
object Lesson16_Analyzing_Geo_Temporal_Data {
/**********************************************************************************************************
* *************************************16.1 Introduction ********************************************
* **************************************************************************************************/
/* In this lesson, we will use new york taxi trip data to illustrate how to work with temporal data
* (dates and times), and geospatial information (points of longitude, and latitude). The data can be download from
* http://www.andresmh.com/nyctaxitrips. The trip_data.7z requires 3.8GB of space. /
*
* Each row of the file after header represents a single taxi ride in CSV format. We have some attributes of the
* cab (a hashed version of the medallion number) as well as the driver (a hashed version of the hack license,
* which is what licenses to drive taxis are called). some temporal information about when the trip started and ended,
* and the longitude/latitude coordinates for where teh passengers were picked up and dropped off.*/
/**********************************************************************************************************
* *************************************16.2 Working with Geospatial and Temporal data *********************
* **************************************************************************************************/
/* Temporal data treatement become easier in java 8 thanks to the java.time package, whose design is based on the
* highly successful JodaTime library.
*
* Geospatial data analyze isn't nearly so simple, there are many different libraries and tools that have different
* functions, states of development, and maturity levels, so there is not a dominant Java library for all geospatial
* use cases.
*
* The first thing you must consider when choosing a library is determine what kind of geospatial data you will work
* with. There are two major kinds:
* - Vector
* - Raster
*
* There are different tools for working with each type. In our case, we have latitude and longitude for our taxi
* trip records, and vector data stored in the Geo-JSON format that represents the boundaries of the different boroughs
* of new york. So we need a library that can parse GeoJSON data and can handle spatial relationshipsf, like detecting
* wheter a given longitude/latitude pair is contianed inside a polygon that represents the boundaries of a particular
* borough.
*
* We use the Esri Geometry API for Java, which has few dependencies and can analyze spatial relationships but can
* only parse a subset of the GeoJSON standard, so it won’t be able to parse the GeoJSON data we downloaded without
* us doing some preliminary data munging.
*
* For a data analyst, this lack of tooling might be an insurmountable problem. But we are data scientists: if our
* tools don’t allow us to solve a problem, we build new tools. In this case, we will add Scala functionality for
* parsing all of the GeoJSON data, including the bits that aren’t handled by the Esri Geometry API, by leveraging
* one of the many Scala projects that support parsing JSON data. The original code of the book can be found
* https://github.com/sryza/aas/tree/master/ch08-geotime . The code of geo-json treatment has also been made available
* as a standalone library on GitHub (https://github.com/jwills/geojson), where it can be used for any kind of
* geospatial analysis project in Scala.
*
*
* */
/* ***************************** 16.2.1 Exploring the Esri Geometry API ***************************************/
/* The core data type of the Esri librarty is the Geometry object. A Geometry describes a shape, accompanied by a
* geolocation where that shape resides. The library contains a set of spatial operations by a geolocation where
* that shape resides. The library contains a set of spatial operations that allows analyzing geometries and their
* relationships. These operations can do things like tell us the area of a geometry and whether two geometries
* overlap, or compute the geometry formed by the union of two geometries.
*
* In our case, we'll have Geometry objects representing dropoff points for cab rides, and Geometry objects that
* represent the boundaries of a borough in NYC. The spatial relationship we're interested in is containment: is
* a given point in space located inside one of the polygons associated with a borough of Manhattan?
*
* The Esri API provides a convenience class called GeometryEngine that contains static methods for performing all
* of the spatial relationship operation, including a contains operation. The contains method takes three arguments
* two Geometry objects, and one instance of the SpatialReference class, which represents the coordinate system used
* to perform the geospatial calculations.
*
* For maximum precision, we need to analyze spatial relationships relative to a coordinate plane that maps each
* point on the misshapen spheroid that is planet Earth into a 2D coordinate system. Geospatial engineers have a
* standard set of well-known identifiers (referred to as WKIDs) that can be used to reference the most commonly
* used coordinate systems. For our purposes, we will be using WKID 4326, which is the standard coordinate
* system used by GPS.
* */
/* We create wrapper classes such as RichGeometry classes that encapsule Esri Geometry object with some useful helper
* method, we also declare a companion object for RichGeometry that provide support for implicitly converting instances
* of the Geometry class into RichGeometry instances.
* To be able to take advantage of this conversion, we need to import the implicit function definition into the Scala
* environment, like import RichGeometry._
*
* The full code is under RichGeometry.scala*/
/*************************************** 16.2.2 Intro to GeoJSON API ******************************************/
/* The data we'll use for the boundaries of boroughs in New York city comes written in a format called GeoJSON.
* The core object in the json file is called a feature, which is made up of a geometry instance and a set of
* key-value pairs called properties. A geometry is a shape like a point, line, or polygon. A set of features is
* called a FeatureCollection. Let's pull down the GeoJSON data for the NYC borough maps and take a look at its
* structure.
*
* You can download data from https://nycdatastables.s3.amazonaws.com/2013-08-19T18:15:35.172Z/nyc-borough-boundaries-polygon.geojson
* Rename the download file to nyc-boroughs.geojson
*
* head of the file looks like this
*
* { "type": "FeatureCollection",
* "features": [ { "type": "Feature", "id": 0, "properties": { "boroughCode": 5, "borough": "Staten Island",
* "@id": "http:\/\/nyc.pediacities.com\/Resource\/Borough\/Staten_Island" }, "geometry": { "type": "Polygon",
* "coordinates": [ [ [ -74.050508064032471, 40.566422034160816 ], [ -74.049983525625748, 40.566395924928273 ],
* [ -74.049316403620878, 40.565887747780437 ], [ -74.049236298420453, 40.565362736368101 ], [ -74.050026201586434,
* 40.565318180621134 ], [ -74.050906017050892, 40.566094342130597 ], [ -74.050679167486138, 40.566310845736403 ],
* [ -74.05107159803778, 40.566722493397798 ], [ -74.050508064032471, 40.566422034160816 ] ] ] } }
*
* We can find the name of borough "staten Island" and other related information.
*
* */
/* The Esri Geometry API will help us parse the Geometry JSON inside each feature but won't help us parse the id or
* the properties fields, which can be arbitrary JSON objects. To parse these, we need to use a Scala JSON lib.
*
* We choose Spray, an open source toolkit for building web services with Scala, provides a JSON library that is up
* to the task. Spray-json allows us to convert any Scala object to a corresponding JsValue by calling an implicit
* toJson method. It also allows us to convert any String that contains JSON to a passed intermediate form by calling
* parseJson, and then converting it to a Scala type T by calling convertTo[T] on the intermediate type. Spray comes
* with built-in conversion implementations for the common Scala primitive types as well as tuples and collection types
* and it also has a formatting library that allows us to declare the rules for converting custom types like our
* RichGeometry class to and from JSON.*/
// The code of case class such as Feature, FeatureCollection can be found in GeoJson
/* After we have defined the case classes for representing the GeoJSON data, we need to define the formats that tell
* Spray how to convert between our domain objects (RichGeometry, Feature, and FeatureCollection) and a corresponding
* JsValue instance. To do this we need to create Scala singleton objects that extend the RootJsonFormat[T] trait,
* which defines abstract read(jsv: JsValue): T and write(t:T):JsValue
*
* For the RichGeometry class, we can delegate most of the parsing and formatting logic to the Esri Geometry API,
* particularly the geometryToGeoJson and geometryFromGeoJson methods on the GeometryEngine class. But for our case
* classes, we need to write the formatting code ourselves. Here’s the formatting code for the Feature case class,
* including some special logic to handle the optional id field
* Page 176*/
/**********************************************************************************************************
* *************************************16.4 Preparing Taxi trip data ******************************
* **************************************************************************************************/
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson16_Analyzing_Geo_Temporal_Data").master("local[2]").getOrCreate()
import spark.implicits._
// read config file
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val filePath=s"${path}/spark_lessons/Lesson16_Analyzing_Geo_Temporal_Data/trip_data_sample.csv"
val taxiRaw=spark.read.option("header","true").csv(filePath)
taxiRaw.show(1)
}
case class Trip(
license: String,
pickupTime: Long,
dropoffTime: Long,
pickupX: Double,
pickupY: Double,
dropoffX: Double,
dropoffY: Double)
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/classification/LogisticRegressionClassification.scala | <filename>WordCount/src/main/java/org/pengfei/spark/ml/classification/LogisticRegressionClassification.scala
package org.pengfei.spark.ml.classification
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature._
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.{Row, SparkSession}
object LogisticRegressionClassification {
def main(args: Array[String]): Unit ={
val spark = SparkSession.builder().
master("local").
appName("LogisticRegressionClassification").
getOrCreate()
val training_data = spark.createDataFrame(Seq(
(0L,"a b c d e spark", 1.0),
(1L,"a b c", 0.0),
(2L,"spark h h d g ", 1.0),
(3L,"hah ho ", 0.0),
(4L,"toto spark", 1.0),
(5L,"a b c", 0.0),
(6L,"spark me to", 1.0),
(7L,"lol no way", 0.0),
(8L,"z spark", 1.0),
(9L,"x y z", 0.0),
(10L,"spark lol", 1.0),
(11L,"I love u", 0.0)
)).toDF("id","text","label")
val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
val hashingTF = new HashingTF().setNumFeatures(1000).setInputCol(tokenizer.getOutputCol).setOutputCol("features")
val lr = new LogisticRegression().setMaxIter(10).setRegParam(0.01)
val pipeline = new Pipeline().setStages(Array(tokenizer, hashingTF, lr))
val model = pipeline.fit(training_data)
val test_data = spark.createDataFrame(Seq(
(0L, "spark i j k"),
(1L, "l m n"),
(2L, "spark a"),
(3L, "apache hadoop")
)).toDF("id","text")
model.transform(test_data).select("id","text", "probability", "prediction").collect().foreach{case Row(id: Long, text: String, prob: Vector, prediction: Double) => println(s"($id, $text) --> prob=$prob, prediction=$prediction")}
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_1_Spark_ML_Intro.scala | <reponame>pengfei99/Spark<filename>LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_1_Spark_ML_Intro.scala
package org.pengfei.Lesson05_Spark_ML
object Lesson05_1_Spark_ML_Intro {
def main(args:Array[String])={
/******************************************************************************************************************
* *****************************************5.1 ML Introduction *************************************************
* **************************************************************************************************************/
/*
* In simple word, a machine learning algorithm infers patterns and relationships between different
* variables in a dataset. It then uses that knowledge to generalize beyond the training dataset. In other words,
* a machine learning algorithm learns to predict from data.*/
/******************************************************************************************************************
* *****************************************5.1.1 ML Terminologies *************************************************
* **************************************************************************************************************/
/****************************************************Features************************************************
*
* A feature represents an attribute or a property of an observation. It is also called a variable. To be more
* specific, a feature represents an independent variable.
*
* In a tabular dataset, a row represents an observation and column represents a feature. For example,
* consider a tabular dataset containing user profiles, which includes fields such as age, gender, profession,
* city, and income. Each field in this dataset is a feature in the context of machine learning. Each row
* containing a user profile is an observation.
*
* Features are also collectively referred to as dimensions. Thus, a dataset with high dimensionality has a
* large number of features.*/
/****************************************Categorical Features**************************************************
* A categorical feature or variable is a descriptive feature. It can take on one of a fixed number of discrete
* values. It represents a qualitative value, which is a name or a label.
*
* The values of a categorical feature have no ordering. For example, in the user profile dataset mentioned
* earlier, gender is a categorical feature. It can take on only one of two values, each of which is a label.
* In the same dataset, profession is also a categorical variable, but it can take on one of several hundred values.
* */
/***************************************Numerical Features******************************************************
* A numerical feature or variable is a quantitative variable that can take on any numerical value. It describes
* a measurable quantity as a number. The values in a numerical feature have mathematical ordering. For example,
* in the user profile dataset mentioned earlier, income is a numerical feature.
*
* Numerical features can be further classified into discrete and continuous features. A discrete numerical
* feature can take on only certain values. For example, the number of bedrooms in a home is a discrete
* numerical feature. A continuous numerical feature can take on any value within a finite or infinite interval.
* An example of a continuous numerical feature is temperature.
* */
/**************************************************Labels*******************************************************
* A label is a variable that a machine learning system learns to predict. It is the dependent variable in a
* dataset. Labels can be a classified into two broad categories: categorical and numerical.
*
* A categorical label represents a class or category. For example, for a machine learning application
* that classifies news articles into different categories such as politics, business, technology, sports, or
* entertainment, the category of a news article is a categorical label.
*
* A numerical label is a numerical dependent variable. For example, for a machine learning application
* that predicts the price of a house, price is a numerical label.*/
/*************************************************Models*******************************************************
* A model is a mathematical construct for capturing patterns within a dataset. It estimates the relationship
* between the dependent and independent variables in a dataset. It has predictive capability. Given the values
* of the independent variables, it can calculate or predict the value for the dependent variable. For example,
* consider an application that forecasts quarterly sales for a company. The independent variables are number
* of sales people, historical sales, macro-economic conditions, and other factors. Using machine learning, a
* model can be trained to predict quarterly sales for any given combination of these factors.
*
* A model is basically a mathematical function that takes features as input and outputs a value. It can be
* represented in software in numerous ways. For example, it can be represented by an instance of a class.
* We will see a few concrete examples later in this Lesson.
*
* A model along with a machine learning algorithm forms the heart of a machine learning system. A machine
* learning algorithm trains a model with data; it fits a model over a dataset, so that the model can predict
* the label for a new observation.
*
* Training a model is a compute intensive task, while using it is not as compute intensive. A model is
* generally saved to disk, so that it can be used in future without having to go through the compute intensive
* training step again. A serialized model can also be shared with other applications. For example, a machine
* learning system may consist of two applications, one that trains a model and another that uses a model.
* */
/************************************************Training Data*************************************************
* The data used by a machine learning algorithm to train a model is called training data or training set. It is
* historical or known data. For example, a spam filtering algorithm uses a known set of spam and non-spam emails.
*
* The training data can be classified into two categories: labeled and unlabeled.
* */
/************************************************Labeled*********************************************************
* A labeled dataset has a label for each observation. One of the columns in the dataset contains the labels.
* For example, a database of homes sold in the last ten years is a labeled dataset for a machine learning
* application that predicts the price of a home. The label in this case is home price, which is known for homes
* sold in the past. Similarly, a spam filtering application is trained with a large dataset of emails, some of
* which are labeled as spam and others as non-spam.*/
/***********************************************Unlabeled********************************************************
* An unlabeled dataset does not have a column that can be used as a label. For example, consider a transaction
* database for an e-commerce site. It records all the online purchases made through that site. This database
* does not have a column that indicates whether a transaction was normal or fraudulent. So for fraud detection
* purposes, this is an unlabeled dataset.
* */
/**********************************************Test data*********************************************************
* The data used for evaluating the predictive performance of a model is called test data or test set. After a
* model has been trained, its predictive capabilities should be tested on a known dataset before it is used on
* new data.
*
* Test data should be set aside before training a model. It should not be used at all during the training
* phase; it should not be used for training or optimizing a model. In fact, it should not influence the training
* phase in any manner; do not even look at it during the training phase. A corollary to this is that a model
* should not be tested with the training dataset. It will perform very well on the observations from the training
* set. It should be tested on data that was not used in training it.
*
* Generally, a small proportion of a dataset is held out for testing before training a model. The exact
* percentage depends on a number of factors such as the size of a dataset and the number of independent
* variables. A general rule of thumb is to use 80% of data for training a model and set aside 20% as test data.
* */
/******************************************************************************************************************
* *****************************************5.1.2 ML Applications *************************************************
* **************************************************************************************************************/
/* Machine learning is used for a variety of tasks in different fields. A large number of applications use machine
* learning, and that number is increasing every day. The machine learning tasks can be broadly grouped into
* the following categories:
* • Classification
* • Regression
* • Clustering
* • Anomaly detection
* • Recommendation
* • Dimensionality reduction
*
* */
/*******************************************5.1.2.1 Classification ************************************************/
/*
* The goal while solving a classification problem is to predict a class or category for an observation. A class
* is represented by a label. The labels for the observations in the training dataset are known, and the goal is
* to train a model that predicts the label for a new unlabeled observation. Mathematically, in a classification
* task, a model predicts the value of a categorical variable.
*
* Classification is a common task in many fields. For example, spam filtering is a classification task. The
* goal of a spam filtering system is to classify an email as a spam or not. Similarly, tumor diagnosis can be
* treated as a classification problem. A tumor can be benign or cancerous. The goal in this case is to predict
* whether a tumor is benign or cancerous. Another example of a classification task is determining credit risk
* of a borrower. Using information such as an individual’s income, outstanding debt, and net worth, a credit
* rating is assigned to an individual.
*
* Machine learning can be used for both binary and multi-class classification. The previous paragraph
* described a few examples of binary classification. In binary classification, the observations in a dataset can
* be grouped into two mutually exclusive classes. Each observation or sample is either a positive or negative
* example.
*
* In multi-class classification, the observations in a dataset can be grouped into more than two classes.
* For example, handwritten zip-code recognition is a multi-class classification problem with ten classes.
* In this case, the goal is to detect whether a handwritten character is one of the digits between 0-9. Each
* digit represents a class. Similarly, image recognition is a multi-class classification task, which has many
* applications. One of the well-known applications is a self-driving or driver-less car. Another application is
* Xbox Kinect360, which infers body parts and position using machine learning.
* */
/*******************************************5.1.2.2 Regression ************************************************/
/* The goal while solving a regression problem is to predict a numerical label for an unlabeled observation.
* The numerical labels are known for the observations in the training dataset and a model is trained to predict
* the label for a new observation.
*
* Examples of regression tasks include home valuation, asset trading, and forecasting. In home valuation,
* the value of a home is the numerical variable that a model predicts. In asset trading, regression techniques
* are used to predict the value of an asset such as a stock, bond, or currency. Similarly, sales or inventory
* forecasting is a regression task.
*
* */
/*******************************************5.1.2.3 Clustering ************************************************/
/*
* In clustering, a dataset is split into a specified number of clusters or segments. Elements in the same cluster
* are more similar to each other than to those in other clusters. The number of clusters depends on the
* application. For example, an insurance company may segment its customers into three clusters: low-risk,
* medium-risk and high-risk. On the other hand, an application may segment users on a social network into
* 10 communities for research purposes.
*
* Some people find clustering confusingly similar to classification. They are different. In a classification
* task, a machine learning algorithm trains a model with a labeled dataset. Clustering is used with unlabeled
* datasets. In addition, although a clustering algorithm splits a dataset into a specified number of clusters, it
* does not assign a label to any cluster. A user has to determine what each cluster represents.
*
* A popular example of clustering is customer segmentation. Organizations use clustering as a data-driven
* technique for creating customer segments, which can be targeted with different marketing programs.
* */
/*******************************************5.1.2.4 Anomaly Detection *****************************************/
/*
* In anomaly detection, the goal is to find outliers in a dataset. The underlying assumption is that an outlier
* represents an anomalous observation. Anomaly detection algorithms are used with unlabeled data.
*
* Anomaly detection has many applications in different fields. In manufacturing, it is used for
* automatically finding defective products. In data centers, it is used for detecting bad systems. Websites use it
* for fraud detection. Another common use-case is detecting security attacks. Network traffic associated with a
* security attack is unlike normal network traffic. Similarly, hacker activity on a machine will be different from
* a normal user activity.
* */
/*******************************************5.1.2.5 Recommendation ************************************************/
/*
* The goal of a recommendation system, also known as recommender system, is to recommend a product to a user.
* It learns from users’ past behavior to determine user preferences. A user rates different products, and
* over time, a recommendation system learns this user’s preferences. In some cases, a user may not explicitly
* rate a product but provide implicit feedback through actions such as purchase, click, view, like, or share.
*
* A recommendation system is one of the well-known examples of machine learning. It is getting
* embedded in more and more applications. Recommendation systems are used to recommend news
* articles, movies, TV shows, songs, books, and other products. For example, Netflix uses recommender
* systems to recommend movies and shows to its subscribers. Similarly, Spotify, Pandora, and Apple use
* recommendation systems to recommend songs to their subscribers.
*
* The two commonly used techniques for building recommendation systems are collaborative filtering
* and content-based recommendation. In collaborative filtering, the properties of a product or user
* preferences are not explicitly programmed. The algorithm assumes that the user preferences and products
* have latent features, which it automatically learns from ratings of different products by different users.
* The input dataset is in a tabular format, where each row contains only a user id, product id, and rating.
* Collaborative filtering learns latent user and product feature just from these three fields. It learns users
* with similar preferences and products with similar properties. The trained model can then be used to
* recommend products to a user. The products recommended to a user are those rated highly by other users
* with similar preferences.
*
* A content-based recommendation system uses explicitly specified product properties to determine
* product similarity and make recommendations. For example, a movie has properties such as genre, lead
* actor, director, and year released. In a content-based system, every movie in a movie database will have
* these properties recorded. For a user who mostly watches comedy movies, a content-based system will
* recommend a movie having genre as comedy.
* */
/*******************************************5.1.2.6 Dimensionality Reduction***************************************/
/*
* Dimensionality reduction is a useful technique for reducing the cost and time it takes to train a machine
* learning system. Machine learning is a compute intensive task. The computation complexity and cost increases
* with the number of features or dimensions in a dataset. The goal in dimensionality reduction is to reduce the
* number of features in a dataset without significantly impacting the predictive performance of a model.
*
* A dataset may have so many dimensions that it is prohibitively expensive to use it for machine learning.
* For example, a dataset may have several thousand features. It may take days or weeks to train a system with
* this dataset. With dimensionality reduction techniques, it can be used to train a machine learning system in
* a more reasonable time.
*
* The basic idea behind dimensionality reduction is that a dataset may have several features that have low
* or zero predictive power. A dimensionality reduction algorithm automatically eliminates these features from
* a dataset. Only the features with most predictive power are used for machine learning. Thus, dimensionality
* reduction techniques reduce the computational complexity and cost of machine learning.*/
/******************************************************************************************************************
* *****************************************5.2 ML Algorithms *************************************************
* **************************************************************************************************************/
/* Machine learning algorithms use data to train a model. The process of training a model is also referred to as
* fitting a model with data. In other words, a machine learning algorithm fits a model on a training dataset.
* Depending on the type of the training data, machine learning algorithms are broadly grouped into two
* categories:
* - supervised machine learning
* - unsupervised machine learning.*/
/******************************************************************************************************************
* *****************************************5.2.1 Supervised ML Algorithms *****************************************
* **************************************************************************************************************/
/*
* A supervised machine learning algorithm trains a model with a labeled dataset. It can be used only with
* labeled training datasets.
*
* Each observation in the training dataset has a set of features and a label. The dependent variable, also
* known as the response variable, represents the label. The independent variables, also known as explanatory
* or predictor variables, represent the features. A supervised machine learning algorithm learns from data to
* estimate or approximate the relationship between a response variable and one or more predictor variables.
*
* The labels in a training dataset may be generated manually or sourced from another system. For
* example, for spam filtering, a large sample of emails are collected and manually labeled as spam or not.
* On the other hand, for sales forecasting, label will be historical sales, which can be sourced from a sales
* database.
*
* Supervised machine learning algorithms can be broadly grouped into two categories:
* - Regression
* - Classification .*/
/*****************************************5.2.1.1 Regression Algorithms *****************************************/
/*
* A regression algorithm trains a model with a dataset that has a numerical label. The trained model can then
* predict numerical labels for new unlabeled observations.
*
* Depending on the number of predictor and response variables, regression tasks can be grouped in three categories:
* - simple regression : involves one response and one predictor variable
* - multiple regression : involves one response and multiple predictor variables
* - multivariate regression : involves several response and several predictor variables.
*
* The commonly used supervised machine learning algorithms for regression tasks include:
* - linear regression
* - decision trees
* - ensembles of trees(Random forest).*/
/* You can get the details of each regression algorithm in separate files Lesson5_2_1_1_Regression_Algo*/
/*****************************************5.2.1.2 Classification Algorithms *****************************************/
/* You can get the details of each classification algorithm in separate files Lesson5_2_1_2_Classification_Algo*/
/******************************************************************************************************************
* *****************************************5.2.2 Unsupervised ML Algorithms *****************************************
* **************************************************************************************************************/
/* An unsupervised machine learning algorithm is used when a dataset is unlabeled. It draws inferences from
* unlabeled datasets. Generally, the goal is to find hidden structure in unlabeled data. Unsupervised machine
* learning algorithms are generally used for
* - clustering
* - anomaly detection
* - dimensionality reduction.
* The list of commonly used unsupervised machine learning algorithms includes k-means, Principal
* Component Analysis, and Singular Value Decomposition (SVD).*/
/*****************************************5.2.2.1 Clustering Algorithms *****************************************/
/* You can get the details of each clustering algorithm in separate files Lesson5_2_2_1_Clustering_Algo*/
/*****************************************5.2.2.2 Anomaly detection Algorithms *************************************/
/* You can get the details of each anomaly detection algorithm in separate files Lesson5_2_2_2_Anomaly_Detection_Algo*/
/*****************************************5.2.2.3 Dimensionality reduction Algorithms *********************************/
/* You can get the details of each clustering algorithm in separate files Lesson5_2_2_3_Dimensionality_reduction_Algo*/
/******************************************************************************************************************
* *****************************************5.2.3 ML Algorithms Hyperparameter ***********************************
* **************************************************************************************************************/
/* Many machine learning algorithms also require a few input parameters that determine the training time and
* predictive effectiveness of a trained model. These parameters, which are not directly learnt, but provided
* as inputs to a learning algorithm, are known as hyperparameters. A good data scientiest can find the best
* hyperparameters to improve model accuracy.*/
/******************************************************************************************************************
* *****************************************5.3 ML Model Validation ***********************************
* **************************************************************************************************************/
/* After a model is trained, it is important to evaluate it on a test dataset. The predictive effectiveness or
* quality of a model can be evaluated using a few different metrics. Generally, the evaluation metric depends
* on the machine learning task. Different metrics are used for linear regression, classification, clustering,
* and recommendation.
*
* A simple model evaluation metric is accuracy. It is defined as the percentage of the labels correctly
* predicted by a model. For example, if a test dataset has 100 observations and a model correctly predicts the
* labels for 90 observations, its accuracy is 90%.
*
* However, accuracy can be a misleading metric. For example, consider a tumors database, where each
* row has data about either a malignant or a benign tumor. In the context of machine learning, a malignant
* tumor is considered a positive sample and a benign tumor is considered a negative sample. Suppose we
* train a model that predicts whether a tumor is malignant (positive) or non-cancerous benign (negative). Is it
* a good model if it has 90% accuracy?
*
* It depends on the test dataset. If the test dataset has 50% positive and 50% negative samples, our model
* is performing well. However, if the test dataset has only 1% positive and 99% negative samples, our model
* is worthless. We can generate a better model without using machine learning; a simple model that always
* classifies a sample as negative will have 99% accuracy. Thus, it has a better accuracy than our trained model,
* even though it incorrectly classifies all the positive samples.
*
* The two commonly used metrics for evaluating a classifier or classification model are:
* - Area under Curve(AUC)
* - F-measure.
* For regression model, we often use
* - Root Mean Squared Error (RMSE)
*
* You can get the details of these validation method in separate files Lesson5_3_Model_Validation*/
/******************************************************************************************************************
* *****************************************5.4 ML High-level Steps *********************************************
* **************************************************************************************************************/
/*
* The high-level steps generally depend on the type of a machine learning task and not so much on the
* machine learning algorithms. For a given task, the same steps can be used with different machine learning
* algorithms.
*
* Supervised machine learning task generally consists of the following high-level steps.
* -- 1. Split data into training, validation, and test sets.
* -- 2. Select the features for training a model.
* -- 3. Fit a model on the training dataset using a supervised machine learning algorithm.
* -- 4. Tune the hyperparameters using the validation dataset.
* -- 5. Evaluate the model on a test dataset.
* -- 6. Apply the model to new data.
*
* Unsupervised machine learning task generally consists of the following high-level steps.
* -- 1. Select the feature variables.
* -- 2. Fit a model using an unsupervised machine learning algorithm.
* -- 3. Evaluate the model using the right evaluation metrics.
* -- 4. Use the model.
* */
/******************************************************************************************************************
* *****************************************5.5 ML in spark *****************************************************
* **************************************************************************************************************/
/*
* Spark provides two machine learning libraries, MLlib and Spark ML (also known as the Pipelines API). These
* libraries enable high-performance machine learning on large datasets. Unlike machine learning libraries that
* can be used only with datasets that fit on a single machine, both MLlib and Spark ML are scalable. They make
* it possible to utilize a multi-node cluster for machine learning.
*
* In addition, since Spark allows an application to cache a dataset in memory, machine learning applications
* built with Spark ML or MLlib are fast.
*
* MLlib is the first machine learning library that shipped with Spark. It is more mature than Spark ML. Both
* libraries provide higher-level abstractions for machine learning than the core Spark API
* */
/******************************************5.5.1 MLlib overview **********************************************/
/* MLlib extends Spark for machine learning and statistical analysis. It provides a higher-level API than the
* Spark core API for machine learning and statistical analysis. It comes prepackaged with commonly used
* machine learning algorithms used for a variety of machine learning tasks. It also includes statistical utilities
* for different statistical analysis.
*
* MLlib integrates with other Spark libraries such as Spark Streaming and Spark SQL. It can be used with both batch
* and streaming data.
*
* Data preparation steps such as data cleansing and feature engineering becomes easier with the DataSet/Frame API
* provided by Spark SQL. Generally, the raw data cannot be used directly with machine learning algorithms.
* Features need to be extracted from the raw data.
*
* Statistical Utilities
* MLlib provides classes and functions for common statistical analysis. It supports summary statistics,
* correlations, stratified sampling, hypothesis testing, random data generation, and kernel density estimation.
*
*
* Machine Learning Algorithms
* MLlib can be used for common machine learning tasks such as regression, classification, clustering,
* anomaly detection, dimensionality reduction, and recommendation. The list of algorithms that come
* bundled with MLlib is ever growing. This section lists the algorithms shipped with MLlib at the time of 2016.
*
* - Regression and Classification
* -- • Linear regression
* -- • Logistic regression
* -- • Support Vector Machine
* -- • Naïve Bayes
* -- • Decision tree
* -- • Random forest
* -- • Gradient-boosted trees
* -- • Isotonic regression
*
* - Clustering
* -- • K-means
* -- • Streaming k-means
* -- • Gaussian mixture
* -- • Power iteration clustering (PIC)
* -- • Latent Dirichlet allocation (LDA)
*
* - Dimensionality Reduction
* -- • Principal component analysis (PCA)
* -- • Singular value decomposition (SVD)
*
* - Feature Extraction and Transformation
* -- • TF-IDF
* -- • Word2Vec
* -- • Standard Scaler
* -- • Normalizer
* -- • Chi-Squared feature selection
* -- • Elementwise product
*
* - Frequent pattern mining
* -- • FP-growth
* -- • Association rules
* -- • PrefixSpan
*
* - Recommendation
* -- • Collaborative filtering with Alternating Least Squares (ALS)
* */
/******************************************5.5.2 MLlib API **********************************************/
/******************************************5.5.3 ML API **********************************************/
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/test/Test.scala | <filename>WordCount/src/main/java/org/pengfei/test/Test.scala
package org.pengfei.test
import org.pengfei.spark.formation.TweetsStat.lineWordCount
object Test {
def main(args:Array[String]): Unit ={
val test="I'm <NAME>"
print(lineWordCount(test))
}
def lineWordCount(text: String): Long={
val word=text.split(" ").map(_.toLowerCase).groupBy(identity).mapValues(_.size)
val counts=word.foldLeft(0){case (a,(k,v))=>a+v}
/* print(word)
print(counts)*/
return counts
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson01_RDD/Lesson01_RDDWithKeyValuePair.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson01_RDD
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
/*
* Key value pairs RDDs are a useful building block in many program
* */
object Lesson01_RDDWithKeyValuePair {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("Lesson1_PaireRDD").
getOrCreate()
// import sparkSession.implicits._ for all schema conversion magic.
//PaireRDDTransformationOperation(spark)
PaireRDDActionOperation(spark)
}
/*****************************************************************************************
* ********************************* Paire RDD transformations **************************
* **************************************************************************************/
/* As paire rdd add key value notion to each element, we can do some transformation on keys and values of the paire*/
def PaireRDDTransformationOperation(spark:SparkSession):Unit={
val inventory=spark.sparkContext.parallelize(List(("coffee",1),
("coffee",2),("soda",4),("soda",8),("alcool",1),("alcool",1)))
println(s"Inventory contents : ${inventory.collect.mkString(",")}")
/**********************Keys ******************************************************/
/* The keys method returns an RDD of only the keys in the source RDD. */
val keyOfInventory=inventory.keys
println(s"Inventory keys : ${keyOfInventory.collect.mkString(",")}")
/********************************* values ***********************************/
/* The values method returns an RDD of only the values in the source RDD. */
val valueOfInventory=inventory.values
println(s"Inventory values : ${valueOfInventory.collect.mkString(",")}")
/********************************mapValues ********************************/
/* The mapValues method is a higher-order method that takes a function as input and applies it to each value
* in the source RDD. It returns an RDD of key-value pairs. It is similar to the map method, except that it applies
* the input function only to each value in the source RDD, so the keys are not changed. The returned RDD has
* the same keys as the source RDD.*/
val squareInventoryValues=inventory.mapValues{x=>x*x}
println(s"Inventory contents after square value : ${squareInventoryValues.collect.mkString(",")}")
/*********************************Filter on keys and values****************************/
/* We can use ._1 to access key and ._2 to access values, if the value is an complexe data structure we can
* use match case pattern to get the value*/
val productOfSoda=inventory.filter(x=>x._1.equals("soda"))
println(s"Products which are sodas: ${productOfSoda.collect().mkString(",")}")
val productNumMoreThan4=inventory.filter(x=>x._2>4)
println(s"Products which has more than 4 items ${productNumMoreThan4.collect().mkString(",")}")
/************************************* join **************************************/
/* The join method takes an RDD of key-value pairs as input and performs an inner join on the source and input
* RDDs. It returns an RDD of pairs, where the first element in a pair is a key found in both source and input RDD
* and the second element is a tuple containing values mapped to that key in the source and input RDD.*/
val pair1=spark.sparkContext.parallelize(List(("a",1),("b",2),("c",3)))
val pair2=spark.sparkContext.parallelize(List(("b","two"),("c","three"),("d","four")))
val joinPair=pair1.join(pair2)
println(s" Join result of two pairs : ${joinPair.collect.mkString(",")}")
/**************LeftOuterJoin, rightOuterJoin and fullOuterJoin******************/
/* The rightOuterJoin method takes an RDD of key-value pairs as input and performs a right outer join on the
* source and input RDD. It returns an RDD of key-value pairs, where the first element in a pair is a key from
* input RDD and the second element is a tuple containing optional value from source RDD and value from
* input RDD. An optional value from the source RDD is represented with the Option type
*
* The leftOuterJoin is similar to the rightOuterJoin. The following example shows how they work
* A=(a,b,c) B=(b,c,d) A.leftOuterJoin(B)=(a,b,c), A.rightOuterJoin(B)=(b,c,d), A.fullOuterJoin(B)=(a,b,c,d)*/
val leftOuterJoin=pair1.leftOuterJoin(pair2)
println(s" Left outer join result of two pairs : ${leftOuterJoin.collect.mkString(",")}")
val rightOuterJoin=pair1.rightOuterJoin(pair2)
println(s" Right outer join result of two pairs : ${rightOuterJoin.collect.mkString(",")}")
val fullOuterJoin=pair1.fullOuterJoin(pair2)
println(s" full outer join result of two pairs : ${fullOuterJoin.collect.mkString(",")}")
/* Note that in the result value tuple of leftJoin, the element from source is the origin type, and the element from
* input rdd has the option type some(T) or None, this because the input rdd may not have the key, to avoid exception
* we use option type, for the rightJoin, it's the element of source rdd has the option type*/
/********************************SampleByKey *****************************************************/
/* The sampleByKey method returns a subset of the source RDD sampled by key. It takes the sampling rate for
* each key as input and returns a sample of the source RDD. This method is used for sampling values of many
* duplicate keys*/
val duplicateKeyPair=spark.sparkContext.parallelize(List(("a", 1), ("b",2), ("a", 11),("b",22),("a", 111), ("b",222),
("c", 111), ("c",222)))
//println(s" duplicate key pair value: ${duplicateKeyPair.collect().mkString(",")}")
//The sampleFraction map must contain all keys, and the sum of fractions of each key must be 1. otherwise
// sampleByKey will return empty rdd
val sampleFractions=Map("a"->0.1,"b"->0.8,"c"->0.1)
// Get an approximate sample from each stratum
val samplePair=duplicateKeyPair.sampleByKey(true,sampleFractions)
println(s"Approximate sample pair of duplicate key pair value: ${samplePair.collect().mkString(",")}")
// Get an exact sample from each stratum, this will cost more resource
val exactSamplePair=duplicateKeyPair.sampleByKeyExact(true,sampleFractions)
println(s"Exact sample pair of duplicate key pair value: ${exactSamplePair.collect().mkString(",")}")
/************************************** substractByKey ******************************************/
/* The subtractByKey method takes an RDD of key-value pairs as input and returns an RDD of key-value pairs
* containing only those keys that exist in the source RDD, but not in the input RDD.
* A=(a,b,c) B=(b,c,d) A.subtractByKey(B)=(a)
* */
val substractPair=pair1.subtractByKey(pair2)
println(s" substractPair value is ${substractPair.collect().mkString(",")}")
/************************************GroupByKey*************************************************/
/* The groupByKey method returns an RDD of pairs, where the first element in a pair is a key from the source
* RDD and the second element is a collection of all the values that have the same key. It is similar to the
* groupBy method that we saw earlier. The difference is that groupBy is a higher-order method that takes as
* input a function that returns a key for each element in the source RDD. The groupByKey method operates on
* an RDD of key-value pairs, so a key generator function is not required as input. Otherwise, we need to give
* a key to groupBy*/
val groupDuplicateKeyPair=duplicateKeyPair.groupByKey()
println(s"Grouped duplicate key pair value: ${groupDuplicateKeyPair.collect.mkString(",")}")
/* Note that, the return rdd has the form (b,CompactBuffer(2, 22, 222)) */
/************************************ ReduceByKey ********************************************/
/* The higher-order reduceByKey method takes an "associative binary operator (which means a op b == b op a)",
* as input and reduces values with the same key to a single value using the specified binary operator.
*
* A binary operator takes two values as input and returns a single value as output. An associative operator
* returns the same result regardless of the grouping of the operands. e.g (a + b)+c=a+(b+c)
*
* The reduceByKey method can be used for aggregating values by key. For example, it can be used for
* calculating sum, product, minimum or maximum of all the values mapped to the same key.*/
val sumByKey=duplicateKeyPair.reduceByKey((x,y)=>x+y)
val minByKey=duplicateKeyPair.reduceByKey((x,y)=> if (x<y) x else y)
println(s"sumByKey value is ${sumByKey.collect().mkString(",")}")
println(s"minByKey value is ${minByKey.collect().mkString(",")}")
/* Note that, here reduceByKey is a transfromation, unlike reduce which is an action. Because reduce is
* aggregating/combining all the elements, while reduceByKey defined on RDDs of pairs is aggregating/combining all
* the elements for a specific key thereby its output is a Map<Key, Value> and since it may still be processed with
* other transformations, and still being a potentially large distributed collection, so it's better to let it continue
* to be an RDD[Key,Value], it is optimal from a pipelining perspective. The reduce cannot result in an RDD simply
* because it is a single value as output.*/
/*******************************************foldByKey ************************************/
/*
* We can use foldByKey operation to aggregate values based on keys. Unlike fold(action), foldByKey is an
* transformation, the reason is similar to reduceByKey, foldByKey only do the fold action of one key, and it
* returns a new Array of key-pair which may need new transformation, so it's better to keep it as transformation and
* return an rdd.
*
* Fold do the aggregation on all elements of the rdd, and return a single value. So it's an action.
*
* In this example, employees are grouped by department name. If you want to find the maximum salaries in a
* given department we can use following code.*/
val deptEmployees = List(
("cs",("jack",1000.0)),
("cs",("bron",1200.0)),
("phy",("sam",2200.0)),
("phy",("ronaldo",500.0))
)
/****makeRDD is identical to parallelize, because makeRDD also calls parallelize*/
val employeeRDD = spark.sparkContext.makeRDD(deptEmployees)
/* foldByKey will first group all elements which have the same key in an Array then apply fold on it. So the
* accumulator is in the form of value only*/
/* ("dummy",0.0) is the start value of the accumulator*/
val maxByDept = employeeRDD.foldByKey(("dummy",0.0))((acc,element)=> if(acc._2 > element._2) acc else element)
// reduceByKey is much easier to use compare to foldByKey
val reduceMaxByDept=employeeRDD.reduceByKey((x,y)=>if(x._2>y._2) x else y)
println(s"maximum salaries in each dept ${maxByDept.collect().mkString(",")}" )
println(s"maximum salaries in each dept calculated by reduceByKey : ${reduceMaxByDept.collect().mkString(",")}" )
/* Compare to foldByKey, fold does not group element by key, so the accumulator need to have the same structure of
* the element of the rdd.*/
val maxOfAllDept = employeeRDD.fold(("dummy",("dummy",0.0)))(
(acc,employee)=> if (acc._2._2>employee._2._2) acc else employee
)
println(s"maximum salaries of all dept ${maxOfAllDept}")
}
/*****************************************************************************************
* ********************************* Paire RDD Actions **************************
* **************************************************************************************/
def PaireRDDActionOperation(spark:SparkSession):Unit={
/****************************** CountByKey **********************************/
/* The countByKey method counts the occurrences of each unique key in the source RDD. It returns a Map of
* key-count pairs.*/
val duplicateKeyPair=spark.sparkContext.parallelize(List(("a", 1), ("b",2), ("a", 11),("b",22),("a", 111), ("b",222),
("c", 111), ("c",222)))
val countOfEachKey=duplicateKeyPair.countByKey()
println(s"countOfEachKey value is : ${countOfEachKey}")
/******************************* lookup *************************************/
/* The lookup method takes a key as input and returns a sequence(WrappedArray) of all the values mapped to that key in the
* source RDD.*/
val lookUpAValues=duplicateKeyPair.lookup("a")
println(s"lookUpAValues has value: ${lookUpAValues}")
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_2_1_1_Regression_Algo.scala | <gh_stars>0
package org.pengfei.Lesson05_Spark_ML
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object Lesson05_2_1_1_Regression_Algo {
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson5_2_1_1_Regression_Algo").getOrCreate()
/*************************************5.2.1.1.1 LinearRegression***********************************/
}
/*****************************************************************************************************
*************************************5.2.1.1.1 LinearRegression ***********************************
* *************************************************************************************************/
def LinearRegressionExample(spark:SparkSession):Unit={
/* Linear regression algorithms fit a linear model with coefficients using training data. A linear model is a
* linear combination of a set of coefficients and explanatory variables. The algorithm estimates the unknown
* coefficients, also known as model parameters, from training data. The fitted coefficients minimize the sum
* of the squares of the difference between predicted and actual observed labels in the training dataset.
*
* A simple example of a linear model is show as : y = q0+q1X1+q2X2+q3X1X2
* y is the label(dependent variable), X1,X2 are the features(independent variables). The values of y, X1, X2 are
* know for each observation(row in dataset). A linear regression algo estimates the values of q0, q1, q3, q4 using
* the training data. After training, the value of q0, q1, q2, q3 are know, we can use the equation and x values to
* estimate y value.
*
* Note that, the model will never perfect match the real data, you will always have caps between estimate and real value
* */
}
/*****************************************************************************************************
*************************************5.2.1.1.2 Isotonic Regression ***********************************
* *************************************************************************************************/
def IsotonicRegressionExample(spark:SparkSession):Unit={
/* The Isotonic Regression algorithm fits a non-decreasing function to a training dataset. It finds the best
* least squares fit to a training dataset with the constraint that the trained model must be a non-decreasing
* function. A least squares function minimizes the sum of the squares of the difference between predicted and
* actual labels in the training dataset. Unlike linear regression, the Isotonic Regression algorithm does not
* assume any form for the target function such as linearity.
*
* Page 160, figure 8-2 (Big data analytics with spark). This figure show well the difference of linear regression
* and isotonic regression*/
}
/*****************************************************************************************************
*************************************5.2.1.1.3 Decision Trees ***********************************
* *************************************************************************************************/
def DecisionTreesExample(spark:SparkSession):Unit={
/* The decision tree algorithm infers a set of decision rules from a training dataset. It creates a decision tree
* that can be used to predict the numeric label for an observation.
*
* A tree is a hierarchal collection of nodes and edges. Unlike a graph, there are no loops in a tree.
* A non-leaf node is called an internal or split node. A leaf node is called a terminal node.
*
* In a decision tree, each internal node tests the value of a feature or predictor variable. The observations
* in the training dataset are segmented into a number of regions using these tests. A leaf node represents a
* region and stores the average value of all the observations belonging to a region.
*
* Given a new unlabeled observation, a decision tree model starts at the root node and evaluates the
* observation features against the internal nodes. It traverses down a tree until it arrives at a terminal node.
* The value stored at the matching terminal node is returned as the predicted label. Thus, a decision tree
* model conceptually implements hierarchal if-else statements. It performs a series of tests on the features to
* predict a label.
*
* Decision trees can be used for both regression and classification tasks. The “Classification Algorithms”
* section describes how decision trees can be used for classification tasks.
*
* The decision tree algorithm has many advantages over other more sophisticated machine learning
* algorithms. First, models trained by a decision tree are easy to understand and explain. Second, it can easily
* handle both categorical and numerical features. Third, it requires little data preparation. For example, unlike
* other algorithms, it does not require feature scaling.
*
* The decision tree has the problem of over fitting and under fitting. This problem causes inaccuracy*/
}
/*****************************************************************************************************
*************************************5.2.1.1.4 Ensembles ***********************************
* *************************************************************************************************/
/* To slove the problem of decision tree, we combine multiple tree models to generate a more powerful model. There
* are called ensemble learning algorithms. It uses several base models to improve generalizability and predictive
* accuracy over a single model. The commonly used algo are:
* - Random Forests
* - Cradient-Boosted Trees*/
def RandomForestExample(spark:SparkSession):Unit={
/* The Random Forest algorithm trains each decision tree in an ensemble independently using a random sample of data.
* In addition, each decision tree is trained using a subset of the features. The number of trees in an ensemble is of
* the order of hundreds. Random Forest creates an ensemble model that has a better predictive performance
* than that of a single decision tree model.
*
* For a regression task, a Random Forest model takes an input feature vector and gets a prediction from
* each decision tree in the ensemble. It averages the numeric labels return by all the trees and returns
* the average as its prediction.*/
}
def GradientBoostedTreesExample(spark:SparkSession):Unit={
/*
* The Gradient-Boosted Trees (GBTs) algorithm also trains an ensemble of decision trees. However, it
* sequentially trains each decision tree. It optimizes each new tree using information from previously
* trained trees. Thus, the model becomes better with each new tree. GBT can take longer to train a model
* since it trains one tree at a time. In addition, it is prone to overfitting if a large number of trees
* are used in an ensemble. However, each tree in a GBT ensemble can be shallow, which are faster to train.*/
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/ClientSatisfait.scala | <gh_stars>0
package org.pengfei.spark.application.example
import breeze.numerics.round
//import org.apache.spark.sql.{Row, SQLContext, SparkSession}
import org.apache.spark.sql.types._
import org.apache.spark.{SparkConf, SparkContext}
object ClientSatisfait {
def main(args:Array[String]): Unit ={
/*val inputFile = "file:///tmp/satisfait.csv"
val spark = SparkSession.builder().master("local").appName("ClientSatisfait").getOrCreate()
val schema = dfSchema()
val df = spark.read.format("com.databricks.spark.csv").option("header", "true").schema(schema).load(inputFile)*/
//val managerList=df.select("manager_name").distinct().collectAsList()
//println(managerList)
//val Arjun=df.select("manager_name","satisfaction_level").groupBy("manager_name").count()
//filter(df("manager_name")=== "<NAME>").
//
//df.show()
//Arjun.show()
}
def dfSchema():StructType={
StructType(
Seq(
StructField(name = "manager_name", dataType = StringType, nullable = false),
StructField(name = "client_name", dataType = StringType, nullable = false),
StructField(name = "client_gender", dataType = StringType, nullable = false),
StructField(name = "client_age", dataType = IntegerType, nullable = false),
StructField(name = "response_time", dataType = DoubleType, nullable = false),
StructField(name = "satisfaction_level", dataType = DoubleType, nullable = false)
)
)
}
// another way to write schema
/* def simpleSchema():StructType={
StructType(Array(
StructField("manager_name",StringType,false),
StructField("client_name",StringType,false),
StructField("client_gender",StringType,false),
StructField("client_age",IntegerType,false),
StructField("response_time",DoubleType,false),
StructField("satisfaction_level",DoubleType,false),
))
}*/
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson21_Testing/TestGreeting.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson21_Testing/TestGreeting.scala
package org.pengfei.Lesson21_Testing
class TestGreeting {
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_Exc04_Parse_Apache_Access_Log.scala | package org.pengfei.Lesson04_Spark_SQL
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
object Lesson04_Exc04_Parse_Apache_Access_Log {
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson4_Exec04_Parse_Apache_Access_Log").getOrCreate()
import spark.implicits._
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path = sparkConfig.getString("sourceDataPath")
val filePath = s"${path}/spark_lessons/Lesson04_Spark_SQL/access.log.2"
// Read raw data
val rawDf = spark.read.text(filePath)
rawDf.cache()
rawDf.show(5, false)
rawDf.count()
// split raw data into token list
val splitDf = rawDf.select(split(col("value"), " ").as("tokenList"))
splitDf.show(5, false)
// transform token list into dataframe
val tokenizedDf = splitDf.withColumn("host", $"tokenList".getItem(0))
.withColumn("rfc931", $"tokenList".getItem(1))
.withColumn("authuser", $"tokenList".getItem(2))
.withColumn("date", concat($"tokenList".getItem(3), $"tokenList".getItem(4)))
.withColumn("request", $"tokenList".getItem(6))
.withColumn("status", $"tokenList".getItem(8))
.withColumn("bytes", $"tokenList".getItem(9))
.drop("tokenList")
tokenizedDf.show(5, false)
tokenizedDf.select("status").distinct()
/* The following request give us the top ten most visited page. We could noticed that the second most viewed item is not in the top 10 sell list */
val mostViewedPage = tokenizedDf.filter($"request".contains("product")).groupBy($"request").count().orderBy($"count".desc)
mostViewedPage.show(10, false)
/* If we want to replace the 20% by space in the request, we can use the regexp_replace*/
val betterView = mostViewedPage.select(regexp_replace($"request", "%20", " ").alias("request"), $"count")
betterView.show(10, false)
/* refine data frame, only keep product name, and rename column name*/
/*Here we use a interesting spark sql function substring_index to get the product name
* substring_index(str, delim, count) : Returns the substring from str before count occurrences of the delimiter
* delim. If count is positive, everything to the left of the final delimiter (counting from the left) is
* returned. If count is negative, everything to the right of the final delimiter (counting from the right)
* is returned.
*
* For example, if we want to keep the head of the string(www), then we do the following
* SELECT substring_index('www.apache.org', '.', 1);
* If we want to keep the tail of the string(org), then we do the following
* SELECT substring_index('www.apache.org', '.', -1);
* */
/* After analysis, we found we have false data in access log, so we want to remove all lines which has "add_to_cart"
* as product_name, we use filter() which takes boolean expression as argument, notice we can't use ! for negation
* here, we need to use not()
* */
val productVisitNumber = betterView
.withColumn("product_name",substring_index(col("request"),"/",-1))
.withColumnRenamed("count","view_number")
.drop("request")
.filter(not($"product_name".contains("add_to_cart")))
.select("product_name","view_number")
productVisitNumber.show(10,false)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_7_Spark_SQL_Schema.scala | package org.pengfei.Lesson04_Spark_SQL
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{ArrayType, DataType, DataTypes, IntegerType, MapType, StringType, StructField, StructType}
import org.apache.spark.sql.functions.{col, from_json, struct, when}
import scala.io.Source
/** ******************************************************************************************************
* ********************************* 07 Define schema for data frames ************************************
* ******************************************************************************************************/
/** In this lesson, we will use Spark SQL "StructType & StructField" classes to programmatically specify the
* schema of a DataFrame and creating complex columns like nested struct, array and map columns.
*
* - StructType(spark.sql.types.StructType) is a collection of StructFields
* - StructField(spark.sql.types.StructField) defines:
* - column name(String): the name of
* - column data type(DataType):
* - nullable(boolean): A boolean value to indicate if the field can be nullable or not
* - metadata(MetaData):
*
* The schema is used when we read semi-structured data such as csv.
* */
object Lesson04_7_Spark_SQL_Schema {
def main(args: Array[String]) = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
/*In this exc01, I will use a yelp data set to illustrate how to do data analytics with spark*/
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path = sparkConfig.getString("sourceDataPath")
val filePath = s"${path}/spark_lessons/Lesson04_Spark_SQL/yelp_academic_dataset_business.json"
val spark = SparkSession.builder().master("local[2]").appName("Lesson4_7_Spark_SQL_Schema").getOrCreate()
/** ***********************************************************************************************
* *************************** 7.1. Data types ***********************************************
* ************************************************************************************************/
/** 7.1.1 Spark Sql data types
* Spark SQL supports the following data types:
* StringType
* HiveStringType
* ArrayType
* MapType
* StructType
* DateType
* TimestampType
* BooleanType
* CalendarIntervalType
* ObjectType
* BinaryType, ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, DecimalType, NumericType
* NullType
*
* All the numeric type such as Integer, Long, are the subclass of NumericType.
* All above type class are the subclass of the DataType class. It provides a list of functions to show the
* properties of each type instances.
* */
/** 7.1.2
* You can noticed in exp1, exp2, a type can be exported in json format. This json string can be used to
* create a new data type class.
* */
//dataTypeExample(spark)
/** 7.1.3 ArrayType, MapType and StructType
*
* We need to pay more attentions to ArrayType, MapType and StructType. They can be very useful in some cases. And
* they are well supported in parquet format, unlike user defined struct type which may impact performance.
* */
/** ***********************************************************************************************
* *************************** 7.2 Schema ***********************************************
* ************************************************************************************************/
/** 7.2.1 StructField
* StructField is the basic building block of a schema, it defines the name, type, nullable, metadata of each column
* For example, the StructField("firstName",StringType,true) defines a column named "firstName" of type string, and
* nullable
*
* */
/** 7.2.2 StructType
* StructType is an array of StructField. A structType can be considered as the schema of a dataset.
*
* In exp5, we use StructType and Field to define a schema. And this schema is used to convert an RDD to a
* data frame
* */
//schemaExample(spark)
/** 7.2.3 Nested StructType
* As we mentioned before in section 7.1. SparkSQL supports not only the simple primitive types, but also complex
* array, map, and nested struct types.
* In exp6. we define a schema which has array, map, and nested struct types
* */
//nestedTypeExample(spark)
/** ***********************************************************************************************
* *************************** 7.3 Export and import Schema *****************************************
* ************************************************************************************************/
/** 7.3.1 Export schema to json
* */
//exportSchemaExample(spark)
// importSchemaExample(spark)
// importSchemaFromDDLExample(spark)
/** ***********************************************************************************************
* *************************** 7.4 Update schema *****************************************
* ************************************************************************************************/
/**
* The schema of a data frame is updated when we create or delete columns in it. In exp10, we use function withColumn
* to create new column and drop old column.
**/
updateSchemaExample(spark)
/** ***********************************************************************************************
* *************************** 7.5 Check metadata of a schema *****************************************
* ************************************************************************************************/
/**
* If you want to perform some checks on metadata of the DataFrame, for example, if a column or field exists
* in a DataFrame or data type of column; we can easily do this using several functions on SQL StructType
* and StructField
*
* In exp11, we checked the name of column and the type.
**/
}
/** ******************************7.1 Data type example ***************************************/
def dataTypeExample(spark: SparkSession): Unit = {
/* Exp1. show the details of String*/
val strType = DataTypes.StringType
println("json : " + strType.json)
println("prettyJson : " + strType.prettyJson)
println("simpleString : " + strType.simpleString)
println("sql : " + strType.sql)
println("typeName : " + strType.typeName)
println("catalogString : " + strType.catalogString)
println("defaultSize : " + strType.defaultSize)
/* Exp2. show the details of Array of integer*/
val arrType = ArrayType(IntegerType, false)
println("json : " + arrType.json)
println("prettyJson : " + arrType.prettyJson)
println("simpleString : " + arrType.simpleString)
println("sql : " + arrType.sql)
println("typeName : " + arrType.typeName)
println("catalogString : " + arrType.catalogString)
println("defaultSize : " + arrType.defaultSize)
/*Exp3. Create array type from json*/
val arrayFromJson = DataType.fromJson("""{"type":"array","elementType":"string","containsNull":false}""".stripMargin)
println(arrayFromJson.getClass)
/*Exp4. Create string type from json*/
val strFromJson2 = DataType.fromJson("\"string\"")
println(strFromJson2.getClass)
}
/** *****************************7.2 schema example ******************************************/
def schemaExample(spark: SparkSession): Unit = {
/*exp5. Convert rdd to df with a given schema*/
val demoData = Seq(Row("James ", "", "Smith", "36636", "M", 3000),
Row("Michael ", "Rose", "", "40288", "M", 4000),
Row("Robert ", "", "Williams", "42114", "M", 4000),
Row("Maria ", "Anne", "Jones", "39192", "F", 4000),
Row("Jen", "Mary", "Brown", "23456", "F", -1)
)
val simpleSchema = StructType(Array(
StructField("firstName", StringType, true),
StructField("middleName", StringType, true),
StructField("lastName", StringType, true),
StructField("id", StringType, true),
StructField("gender", StringType, true),
StructField("salary", IntegerType, true)
))
val rdd = spark.sparkContext.parallelize(demoData)
val df = spark.createDataFrame(rdd, simpleSchema)
df.printSchema()
df.show()
}
def nestedTypeExample(spark: SparkSession): Unit = {
/*exp6. array, map, nested-struct*/
val demoData = Seq(
Row(Row("James ", "", "Smith"), List("Cricket", "Movies"), Map("hair" -> "black", "eye" -> "brown")),
Row(Row("Michael ", "Rose", ""), List("Tennis"), Map("hair" -> "brown", "eye" -> "black")),
Row(Row("Robert ", "", "Williams"), List("Cooking", "Football"), Map("hair" -> "red", "eye" -> "gray")),
Row(Row("Maria ", "Anne", "Jones"), null, Map("hair" -> "blond", "eye" -> "red")),
Row(Row("Jen", "Mary", "Brown"), List("Blogging"), Map("white" -> "black", "eye" -> "black"))
)
val schema = new StructType()
// the type of column fullName is a nested structType which has three columns
.add("fullName", new StructType()
.add("firstName", StringType)
.add("middleName", StringType)
.add("lastName", StringType))
// the type of column hobbies is an array of string
.add("hobbies", ArrayType(StringType))
// the type of column properties is a map of (string->string)
.add("properties", MapType(StringType, StringType))
val rdd = spark.sparkContext.parallelize(demoData)
val df = spark.createDataFrame(rdd, schema)
df.printSchema()
df.show(false)
}
/** *****************************7.3 export schema example ******************************************/
def exportSchemaExample(spark: SparkSession): Unit = {
/*exp7. get schema of a data frame, and export it in string or json*/
val demoData = Seq(Row("James ", "", "Smith", "36636", "M", 3000),
Row("Michael ", "Rose", "", "40288", "M", 4000),
Row("Robert ", "", "Williams", "42114", "M", 4000),
Row("Maria ", "Anne", "Jones", "39192", "F", 4000),
Row("Jen", "Mary", "Brown", "23456", "F", -1)
)
val simpleSchema = StructType(Array(
StructField("firstName", StringType, true),
StructField("middleName", StringType, true),
StructField("lastName", StringType, true),
StructField("id", StringType, true),
StructField("gender", StringType, true),
StructField("salary", IntegerType, true)
))
val rdd = spark.sparkContext.parallelize(demoData)
val df = spark.createDataFrame(rdd, simpleSchema)
//get the schema of a data frame. The returned schema is a structureType
val schema: StructType = df.schema
println("TreeString : " + schema.treeString)
println("json : " + schema.json)
println("prettyJson : " + schema.prettyJson)
println("simpleString : " + schema.simpleString)
println("sql : " + schema.sql)
println("typeName : " + schema.typeName)
println("catalogString : " + schema.catalogString)
println("defaultSize : " + schema.defaultSize)
}
def importSchemaExample(spark: SparkSession): Unit = {
/*exp8. get schema from a json file, and apply it on an dataframe*/
val demoData = Seq(Row("James ", "", "Smith", "36636", "M", 3000),
Row("Michael ", "Rose", "", "40288", "M", 4000),
Row("Robert ", "", "Williams", "42114", "M", 4000),
Row("Maria ", "Anne", "Jones", "39192", "F", 4000),
Row("Jen", "Mary", "Brown", "23456", "F", -1)
)
val jsonString =
"""
|{
| "type" : "struct",
| "fields" : [ {
| "name" : "firstName",
| "type" : "string",
| "nullable" : true,
| "metadata" : { }
| }, {
| "name" : "middleName",
| "type" : "string",
| "nullable" : true,
| "metadata" : { }
| }, {
| "name" : "lastName",
| "type" : "string",
| "nullable" : true,
| "metadata" : { }
| }, {
| "name" : "id",
| "type" : "string",
| "nullable" : true,
| "metadata" : { }
| }, {
| "name" : "gender",
| "type" : "string",
| "nullable" : true,
| "metadata" : { }
| }, {
| "name" : "salary",
| "type" : "integer",
| "nullable" : true,
| "metadata" : { }
| } ]
|}
|""".stripMargin
val schema = DataType.fromJson(jsonString).asInstanceOf[StructType]
val dfFromJson = spark.createDataFrame(
spark.sparkContext.parallelize(demoData), schema)
dfFromJson.printSchema()
dfFromJson.show(false)
}
def importSchemaFromDDLExample(spark: SparkSession): Unit = {
/*exp9. get schema from a ddl file, and apply it on an dataframe*/
val demoData = Seq(Row(Row("James ", "", "Smith"), "36636", "M", 3000),
Row(Row("Michael ", "Rose", ""), "40288", "M", 4000),
Row(Row("Robert ", "", "Williams"), "42114", "M", 4000),
Row(Row("Maria ", "Anne", "Jones"), "39192", "F", 4000),
Row(Row("Jen", "Mary", "Brown"), "23456", "F", -1))
/*We can also create a schema from ddl file*/
val ddlSchemaStr = "`fullName` STRUCT<`first`: STRING, `last`: STRING,`middle`: STRING>,`id` STRING,`gender` STRING," +
"`salary` INT"
val ddlSchema = StructType.fromDDL(ddlSchemaStr)
val dfFromDDL = spark.createDataFrame(
spark.sparkContext.parallelize(demoData), ddlSchema)
dfFromDDL.printSchema()
dfFromDDL.show(false)
}
def updateSchemaExample(spark: SparkSession): Unit = {
/*exp10. get schema from a ddl file, and apply it on an dataframe*/
val demoData = Seq(Row(Row("James ", "", "Smith"), "36636", "M", 3000),
Row(Row("Michael ", "Rose", ""), "40288", "M", 4000),
Row(Row("Robert ", "", "Williams"), "42114", "M", 4000),
Row(Row("Maria ", "Anne", "Jones"), "39192", "F", 4000),
Row(Row("Jen", "Mary", "Brown"), "23456", "F", -1))
/*We can also create a schema from ddl file*/
val ddlSchemaStr = "`fullName` STRUCT<`first`: STRING, `last`: STRING,`middle`: STRING>,`id` STRING,`gender` STRING," +
"`salary` INT"
val ddlSchema = StructType.fromDDL(ddlSchemaStr)
val df = spark.createDataFrame(
spark.sparkContext.parallelize(demoData), ddlSchema)
df.printSchema()
df.show(false)
val updatedDF = df.withColumn("OtherInfo", struct(
col("id").as("identifier"),
col("gender").as("gender"),
col("salary").as("salary"),
when(col("salary").cast(IntegerType) < 2000, "Low")
.when(col("salary").cast(IntegerType) < 4000, "Medium")
.otherwise("High").alias("Salary_Grade")
)).drop("id", "gender", "salary")
updatedDF.printSchema()
updatedDF.show(false)
/*exp 11*/
//It should print true, because df has a column named gender
println(df.schema.fieldNames.contains("gender"))
//It should print false, because df does not have a column named email
println(df.schema.fieldNames.contains("email"))
//It should print true, because df has a column named gender which has type string
println(df.schema.contains(StructField("gender", StringType, true)))
//It should print false, because gender column is not integer
println(df.schema.contains(StructField("gender", IntegerType, true)))
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson14_Understanding_Wiki_With_Latent_Semantic_Analysis/Lesson14_Spark_Xml_Parsing.scala | <gh_stars>0
package org.pengfei.Lesson14_Understanding_Wiki_With_Latent_Semantic_Analysis
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.{RegexTokenizer, StopWordsRemover, Tokenizer}
import org.apache.spark.sql.{DataFrame, SparkSession}
object Lesson14_Spark_Xml_Parsing {
/****************************************************************************************************************
* ******************************** 14.A Spark xml parsing ****************************************************
* **********************************************************************************************/
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson14_Spark_Xml_Parsing").master("local[2]").getOrCreate()
import spark.implicits._
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val filePath=s"${path}/spark_lessons/Lesson14_Latent_Semantic_Analysis/mini-wikidump.xml"
/* As we have bugs in parsing the xml files, so here we use another xml parsing techinque to parse the wiki
* xml dump.
*
* The lib which we use here is from databricks. You can find more info here (https://github.com/databricks/spark-xml)
*
* */
/*********************************14.A.1 Read xml as dataframe *****************************************/
/* There are few key options to read the xml files
* - path : file location (e.g. local fs, hdfs, s3)
* - rowTag : The row tag of your xml files to treat as a row. For example, in this xml
* <books> <book><book> ...</books>, the appropriate value would be book. Default is ROW.
* At the moment, rows containing self closing xml tags are not supported.
* - rootTag : The root tag of your xml files to treat as the root. in this xml <books> <book><book> ...</books>,
* the appropriate value would be books. Default is ROWS
* - nullValue : The value to write null value. Default is string null. when this is null, it does not write
* attributes and elements for fields.
* - valueTag: The tag used for the value when there are attributes in the element having no child. Default is _VALUE.
*
* */
val df=spark.read.format("com.databricks.spark.xml").option("rowTag","page").load(filePath)
// df.show(1,false)
// df.printSchema()
// println(s" row number : ${df.count()}")
/* The xml file has the following structure :
* <page>
* <title>...</title>
<ns>...</ns>
<id>...</id>
<revision>
<id>546483579</id>
<parentid>524214820</parentid>
<timestamp>2013-03-23T06:27:20Z</timestamp>
<contributor>
<username>Addbot</username>
<id>6569922</id>
</contributor>
<minor/>
<comment> ... </comment>
<model>wikitext</model>
<format>text/x-wiki</format>
<text xml:space="preserve" bytes="430">...</text>
<sha1>apzvvk61z15qfk90rh4y7nkhu8rzay2</sha1>
</revision>
* </page>
*
* The output ds has the following schema:
*
* root
|-- id: long (nullable = true)
|-- ns: long (nullable = true)
|-- revision: struct (nullable = true)
| |-- comment: string (nullable = true)
| |-- contributor: struct (nullable = true)
| | |-- id: long (nullable = true)
| | |-- username: string (nullable = true)
| |-- format: string (nullable = true)
| |-- id: long (nullable = true)
| |-- minor: string (nullable = true)
| |-- model: string (nullable = true)
| |-- parentid: long (nullable = true)
| |-- sha1: string (nullable = true)
| |-- text: struct (nullable = true)
| | |-- _VALUE: string (nullable = true)
| | |-- _bytes: long (nullable = true)
| | |-- _space: string (nullable = true)
| |-- timestamp: string (nullable = true)
|-- title: string (nullable = true)
* */
/* We could notice the first level son of page become columns, as it's a dataset, we can access also the
* second/third/... level of sons */
val test=df.select("title","revision")
// test.show(1,false)
// test.printSchema()
/* Create a new column text, which is the second level son of page which represent text, note that
* <text> has parameters (e.g. space, bytes), with $"revision.text"*/
val text=test.withColumn("fullText",$"revision".getField("text"))
/*text.select("fullText").show(1,false)*/
/* Another way to get the second level son*/
val text1=test.withColumn("fullText",$"revision.text")
/*text1.select("fullText").show(1,false)*/
/* Get the value of space or bytes of the <text xml:space="",bytes="">...</text> */
val spaceAndByptes=test.withColumn("space",$"revision.text._space")
.withColumn("bytes",$"revision.text._bytes")
/*spaceAndByptes.select("space","bytes").show(1,false)*/
/* With the following code, we can get pure text value without space and bytes parameters values*/
val pureTextValue= test.withColumn("text",$"revision.text._VALUE")
pureTextValue.select("text").show(1,false)
TextToTerm(spark)
}
def TextToTerm(spark:SparkSession):DataFrame={
val text=spark.createDataFrame(Seq(
(0,"'''[[Discrete geometry]]''' or '''combinatorial geometry''' may be loosely defined as study of geometrical objects and properties that are [[discrete mathematics|discrete]] or [[combinatorial]], either by their nature or by their representation; the study that does not essentially rely on the notion of [[continuous function|continuity]].\n{{Commons cat|Discrete geometry}}\n\n[[Category:Geometry]]\n[[Category:Discrete mathematics]]")
)).toDF("id","text")
// The normal tokenizer takes also the special characters such as ''', [,], it only uses space
// to split words
val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("terms")
// The regexTokenizer can take regular expression to filter tokens which matches
val regexTokenizer = new RegexTokenizer()
.setInputCol("text")
.setOutputCol("terms")
.setPattern("\\W")
/*val tokenized = tokenizer.transform(text)
tokenized.select("text","terms").show(1,false)*/
val regexTokenized=regexTokenizer.transform(text)
regexTokenized.select("text","terms").show(false)
// add a stop words remover
val remover= new StopWordsRemover()
.setInputCol("terms")
.setOutputCol("filteredTerms")
val filteredTerms=remover.transform(regexTokenized)
filteredTerms.select("terms","filteredTerms").show(1,false)
filteredTerms
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson02_Spark_Jobs_And_Shared_Var/Lesson02_Spark_Jobs.scala | package org.pengfei.Lesson02_Spark_Jobs_And_Shared_Var
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object Lesson02_Spark_Jobs {
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
def spark=SparkSession.builder().master("local[2]").appName("Lesson2_Spark_Jobs").getOrCreate()
def sc=spark.sparkContext
/********************************************************************************************************
*****************************************2.1 Spark Jobs************************************************
* *****************************************************************************************************/
/* 2.1.1 Introduction
* RDD operations, including transformation, action and caching methods form the basis of a Spark
* application. Essentially, RDDs describe the Spark programming model. Now that we have covered the
* programming model, we will discuss how it all comes together in a Spark application
*
* 2.1.2 What is a spark job?
* A job is a set of computations that Spark performs to return the results of an action to a driver program.
* An application can launch one or more jobs. It launches a job by calling an action method of an RDD. Thus,
* an action method triggers a job. If an action is called for an RDD that is not cached or a descendant of a
* cached RDD, a job starts with the reading of data from a storage system. However, if an action is called for
* an RDD that is cached or a descendent of a cached RDD, a job begins from the point at which the RDD or its
* ancestor RDD was cached. Next, Spark applies the transformations required to create the RDD whose action
* method was called. Finally, it performs the computations specified by the action. A job is completed when a
* result is returned to a driver program.
*
* 2.1.3 What is task stages in a spark job?
* When an application calls an RDD action method, Spark creates a DAG of task stages. It groups tasks into stages
* using shuffle boundaries. Tasks that do not require a shuffle are grouped into the same stage. A task that requires
* its input data to be shuffled begins a new stage. A stage can have one or more tasks. Spark submits tasks to
* the executors, which run the tasks in parallel. Tasks are scheduled on nodes based on data locality. If a node
* fails while working on a task, Spark resubmits task to another node.
*
* 2.1.4 Shuffle operation
* The shuffle operation is Spark’s mechanism for re-distributing data so that it’s grouped differently across
* partitions. This typically involves copying data across executors and machines, making the shuffle a complex
* and costly operation.
*
* The Shuffle is an expensive operation since it involves disk I/O, data serialization, and network I/O.
* To organize data for the shuffle, Spark generates sets of tasks - map tasks to organize the data, and a set of
* reduce tasks to aggregate it. This nomenclature comes from MapReduce and does not directly relate to Spark’s
* map and reduce operations.
*
* Which RDD transformations and actions can cause shuffle operations?
* This is hard to answer, because many transformations may or may not cause shuffle operations. It depends on many
* factors.
*
* Below is a list which may cause shuffle operations.
* -cogroup
* -groupwith
* -join
* -leftOuterJoin
* -rightOuterJoin
* -groupByKey
* -reduceByKey
* -combineByKey
* -sortByKey
* -distinct
* -intersection
* -repartition : Very high chance to cause shuffle
* -coalesce : Very high chance to cause shulle
*
* The best way to know is to check your spark job DAG, One stage means one shuffle.
* You can also use toDebugString method, But beware, toDebugString returns "A description of this RDD and its
* recursive dependencies for debugging." So it will include possible shuffles from prior transformations if they exist.
* Check well the output of toDebugString method to make sure which transformation causes shuffle.
*
*
* */
val rdd=sc.parallelize(List("orange","apple","banana","kiwi"))
println(s" distinct on rdd need shuffle or not: ${rdd.distinct().toDebugString}")
/********************************************************************************************************
*****************************************2.2 Shared Variables*********************************************
* *****************************************************************************************************/
/* Spark uses a shared-nothing architecture. Data is partitioned across a cluster of nodes and each node in
* a cluster has its own CPU, memory, and storage resources. There is no global memory space that can be
* shared by the tasks. The driver program and job tasks share data through messages.
*
* For example, if a function argument to an RDD operator references a variable in the driver program, Spark
* sends a copy of that variable along with a task to the executors. Each task gets its own copy of the variable
* and uses it as a read-only variable. Any update made to that variable by a task remains local. Changes are
* not propagated back to the driver program. In addition, Spark ships that variable to a worker node at the
* beginning of every stage.
*
* This default behavior can be inefficient for some applications. In one use case, the driver program
* shares a large lookup table with the tasks in a job and the job involves several stages. By default, Spark
* automatically sends the driver variables referenced by a task to each executor; however, it does this for each
* stage. Thus, if the lookup table holds 100 MB data and the job involves ten stages, Spark will send the same
* 100 MB data to each worker node ten times.
*
* Another use case involves the ability to update a global variable in each task running on different nodes.
* By default, updates made to a variable by a task are not propagated back to the driver program.
* */
/********************************************2.2.1 Broadcast variables ******************************************/
/* Broadcast variables enable a Spark application to optimize sharing of data between the driver program
* and the tasks executing a job. Spark sends a broadcast variable to a worker node only once and caches it in
* deserialized form as a read-only variable in executor memory. In addition, it uses a more efficient algorithm
* to distribute broadcast variables.
*
* Note that a broadcast variable is useful if a job consists of multiple stages and tasks across stages
* reference the same driver variable. It is also useful if you do not want the performance hit from having to
* deserialize a variable before running each task. By default, Spark caches a shipped variable in the executor
* memory in serialized form and deserializes it before running each task.
*
* The SparkContext class provides a method named broadcast for creating a broadcast variable. It takes
* the variable to be broadcasted as an argument and returns an instance of the Broadcast class. A task must
* use the value method of a Broadcast object to access a broadcasted variable.
*
* */
case class Transaction(id:Long,cusId:Int,itemId:Int)
case class TransactionDetail(id:Long,custName:String,itemName:String)
val customerMap=Map(1->"Tom",2->"Bob")
val itemMap=Map(1->"Car",2->"Phone")
val transactions=sc.parallelize(List(Transaction(1,1,1),Transaction(2,1,2)))
val bcCustomerMap=sc.broadcast(customerMap)
val bcItemMap=sc.broadcast(itemMap)
val transactionDetails=transactions.map{t:Transaction=>
TransactionDetail(t.id,bcCustomerMap.value(t.cusId),bcItemMap.value(t.itemId))}
//Without using broadcast variables, it works too, but it will take more times, because of deserailization
/* val transactionDetails=transactions.map{t:Transaction=>
TransactionDetail(t.id,customerMap.get(t.cusId).get,itemMap.get(t.itemId).get)}*/
println(s"Transaction details value: ${transactionDetails.collect.mkString("||")}")
/* The use of broadcast variables enabled us to implement an efficient join between the customer, item
* and transaction dataset. We could have used the join operator from the RDD API, but that would shuffle
* customer, item, and transaction data over the network. Using broadcast variables, we instructed Spark
* to send customer and item data to each node only once and replaced an expensive join operation with a
* simple map operation.*/
/*********************************** Understanding closures ****************************************************/
/*
* Before we talk Accumulator, we need to understand the scope and life cycle of variables and methods when
* executing spark job across a cluster. RDD operations that modify variables outside of their scope can be a frequent
* source of confusion. In the example below we’ll look at code that uses foreach() to increment a counter, but
* similar issues can occur for other operations as well.
*
*
* var counter = 0
* var rdd = sc.parallelize(data)
* // Wrong: Don't do this!!
* rdd.foreach(x => counter += x)
* println("Counter value: " + counter)
*
* The above code may work in local mode, because the counter and the closure foreach are in the same JVM.
* But in cluster mode, the rdd are split in the cluster, and the closure for each is executed in each executor.
* As a result the counter in slave node executor are updated, but no in the spark driver. So the result will be
* always 0. To solve this kind of problems, we need to use Accumulators.
*
* For more details, visite https://spark.apache.org/docs/2.2.0/rdd-programming-guide.html#accumulators
* */
/**************************** 1.6 Accumulators ****************************************************************/
/*
* Accumulators are variables that are only “added” to through an associative and commutative operation and can
* therefore be efficiently supported in parallel. They can be used to implement counters (as in MapReduce) or sums.
* Spark natively supports accumulators of numeric types, and programmers can add support for new types.
*
* The SparkContext class provides a method named accumulator for creating an accumulator variable.
* It takes two arguments. The first argument is the initial value for the accumulator and the second argument,
* which is optional, is a name for displaying in the Spark UI. It returns an instance of the Accumulator
* class, which provides the operators for working with an accumulator variable. Tasks can only add a value
* to an accumulator variable using the add method or += operator. Only the driver program can read an
* accumulator’s value using it value method.
* */
/*
* A numeric accumulator can be created by calling SparkContext.longAccumulator() or SparkContext.doubleAccumulator()
* to accumulate values of type Long or Double, respectively. Tasks running on a cluster can then add to it using the
* add method. However, they cannot read its value. Only the driver program can read the accumulator’s value,
* using its value method.
*
* You can write your own accumulator by extending AccumulatorV2 class. For more details,
* visit https://spark.apache.org/docs/2.2.0/rdd-programming-guide.html#accumulators
* */
//named accumulator
val accum=spark.sparkContext.longAccumulator("My Accumulator")
val nums=sc.parallelize(Seq(1,2,3,4,5,6,7,8,9,34,57,45,5757,34,34,57,56,91))
//sum all elements in nums
nums.foreach(x=>accum.add(x))
println(s"Accum value is : ${accum.value}")
case class Customer(id:Long,name:String)
val customers=sc.parallelize(List(
Customer(1,"Tom"),
Customer(2,"Harry"),
Customer(-1,"Paul")
))
//println(s"customers value ${customers.collect.mkString(",")}")
val badIds = sc.longAccumulator("Bad id accumulator")
/*customers.foreach(c=>{c match {
case Customer(id,name)=>{if(id<0)badIds.add(1)}
}})*/
val validCustomers = customers.filter(c => if (c.id < 0) {
badIds.add(1)
false
} else true
)
println(s"Good id value: ${validCustomers.count}")
println(s"badIds value: ${badIds.value}")
/* Accumulators should be used with caution. Updates to an accumulator within a transformation are not
* guaranteed to be performed exactly once. If a task or stage is re-executed, each task’s update will be applied
* more than once.
*
* In addition, the update statements are not executed until an RDD action method is called. RDD
* transformations are lazy; accumulator updates within a transformation are not executed right away.
* Therefore, if a driver program uses the value of an accumulator before an action is called, it will get the
* wrong value.*/
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_6_Spark_SQL_Built_In_Functions.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson04_Spark_SQL
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions.Window
import org.pengfei.Lesson04_Spark_SQL.Lesson04_5_Spark_DataSet.{EmailArrayBody, EmailStringBody, SalesByCity}
object Lesson04_6_Spark_SQL_Built_In_Functions {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson4_1_Saprk_SQL_Intro").getOrCreate()
/**********************************************************************************************************
* *********************************************4.5.6 DataSet built-in functions**************************
* ************************************************************************************************/
/* Spark SQL comes with a comprehensive list of built-in functions, which are optimized for fast execution. It
* implements these functions with code generation techniques. The built-in functions can be used from both
* the DataFrame API and SQL interface.
*
* To use Spark’s built-in functions from the DataFrame API, you need to add the following import
* statement to your source code.
* import org.apache.spark.sql.functions._
*
* The built-in functions can be classified into the following categories:
* - aggregate
* - collection
* - date/time
* - math
* - string
* - window
* - miscellaneous functions.
* */
/**************************************4.5.6.1 Aggregate functions **********************************/
// DSAggOperations(spark)
/**************************************4.5.6.2 Collection functions **********************************/
// DSCollectionOperations(spark)
/**************************************4.5.6.3 Date/time functions **********************************/
// DSDateTimeOperations(spark)
/**************************************4.5.6.4 Math functions **********************************/
// DSMathOperations(spark)
/**************************************4.5.6.5 String functions **********************************/
DSStringOperations(spark)
/**************************************4.5.6.6 Window functions **********************************/
// DSWindowOperations(spark)
/**************************************4.5.6.7 Miscellaneous functions **********************************/
// DSMiscellaneousOperations(spark)
/*************************************4.5.6.8 UDFs and UDAFs ***********************************************/
// DSUDFOperations(spark)
}
/*******************************************************************************************************
**********************************************4.5.6.1 Aggregate functions ****************************
* ***************************************************************************************************/
def DSAggOperations(spark: SparkSession):Unit={
/* The aggregate functions can be used to perform aggregations on a column. The built-in aggregate
* functions
* - approxCountDistinct
* - avg
* - count
* - countDistinct
* - first
* - last
* - max
* - mean
* - min
* - sum
* - sumDistinct.*/
import spark.implicits._
val salesByCity = List(SalesByCity(2014, "Boston", "MA", "USA", 2000),
SalesByCity(2015, "Boston", "MA", "USA", 3000),
SalesByCity(2014, "Cambridge", "MA", "USA", 2000),
SalesByCity(2015, "Cambridge", "MA", "USA", 3000),
SalesByCity(2014, "Palo Alto", "CA", "USA", 4000),
SalesByCity(2015, "Palo Alto", "CA", "USA", 6000),
SalesByCity(2014, "Pune", "MH", "India", 1000),
SalesByCity(2015, "Pune", "MH", "India", 1000),
SalesByCity(2015, "Mumbai", "MH", "India", 1000),
SalesByCity(2014, "Mumbai", "MH", "India", 2000)).toDF()
/* After a group by you can do simple aggregate functions, if you want to do agg on multiple columns, you need to use
* agg method*/
val groupByCityAndYear=salesByCity.groupBy("city").sum("revenue") as "amount"
val groupByYear=salesByCity.groupBy("year").avg("revenue")
groupByCityAndYear.show()
groupByYear.show()
/*********************************agg*******************************************/
/* The agg method performs specified aggregations on one or more columns in the source DataFrame and
* returns the result as a new DataFrame. You need to import org.apache.spark.sql.functions._*/
val aggregates = salesByCity.groupBy("year").agg(min("revenue"),max("revenue"),sum("revenue"),count("year"))
aggregates.show()
/**********************************************************************************************************
* ******************************************** 4.5.6.2 Collection ***************************************
*********************************************************************************************************/
}
def DSCollectionOperations(spark:SparkSession):Unit={
import spark.implicits._
/* The collection functions operate on columns containing a collection of elements. The built-in collection functions
* includes :
* - array_contains :
* - explode :
* - size :
* - sort_array:
* */
val emailsWithArrayBody=List(EmailArrayBody("James", "Mary", "back", Array("just", "got", "vacation")),
EmailArrayBody("John", "Jessica", "money", Array("make", "dollars")),
EmailArrayBody("Tim", "Kevin", "report", Array("send", "ASAP"))).toDF()
val emailsWithStringBody = List(EmailStringBody("James", "Mary", "back", "just got back from vacation"),
EmailStringBody("John", "Jessica", "money", "make million dollars"),
EmailStringBody("Tim", "Kevin", "report", "send me sales report ASAP")).toDF()
/***************************************Array_contains**************************************************/
/* The array_contains method check if an (array) Columns contains a certain element, it returns a boolean*/
println("*********************************************Array_contains**************************************")
val haveSend=emailsWithArrayBody.withColumn("haveSend",array_contains($"body","send"))
haveSend.show()
/***************************************size**************************************************/
/* The size method counts the size of array in the specific column. it returns an int*/
println("********************************************* size **************************************")
val wordsCount=emailsWithArrayBody.withColumn("wordsNB",size($"body"))
wordsCount.show()
/***************************************sort_array**************************************************/
/* The sort_array method sort the array of the specific columns, it returns a new sorted array*/
println("*********************************************sort_array**************************************")
val sortBody=emailsWithArrayBody.withColumn("sortedBody",sort_array($"body"))
sortBody.show()
/***************************************Explode****************************************************/
/* The explode method generates zero or more row from a column using a user provided function. It takes three
* arguments. The first argument is the input column, the second argument is the output column and the third
* argument is a user provided function that generates one or more value for the output column for each value
* in the input column.
*
* This method(dataset.explode) is deprecated since spark 2.0. Use flatMap or select with functions.explode()
*
* */
println("*********************************************Explode**************************************")
// you must import org.apache.spark.sql.functions.explode for this to work
// The explode works well with column of array or map type, With string type, it does not work.
val wordDf=emailsWithArrayBody.withColumn("words",explode($"body"))
wordDf.show()
// The simple way is to transform string type to Array type
def splitStringToArray(input: String):Array[String] = {
input.split(" ")
}
spark.udf.register("splitStringToArray",(arg:String)=>splitStringToArray(arg))
val emailStringBodyTrans=emailsWithStringBody.withColumn("ArrayBody", expr("splitStringToArray(body)"))
emailStringBodyTrans.show()
val wordDfFromString=emailStringBodyTrans.withColumn("words",explode($"ArrayBody"))
wordDfFromString.show()
/* use explode with udf, failed need to revisit
val flatMapUdf=spark.udf.register("myFlatMapFunction",myFlatMapFunction(String))
val wordDf1=emailsWithStringBody.withColumn("words",explode(myFlatMapFunction($"body")))
wordDf1.show()
*/
val wordDfDep=emailsWithStringBody.explode("body","words"){body:String=>body.split(" ")}
wordDfDep.show()
}
/**********************************************************************************************************
* ******************************************** 4.5.6.3 Date/Time ***************************************
*********************************************************************************************************/
def DSDateTimeOperations(spark:SparkSession):Unit={
import spark.implicits._
/* The date/time functions make it easy to process columns containing date/time values. These functions
* can be further sub-classified into the following categories:
* - conversion
* - extraction
* - arithmetic
* - miscellaneous
* */
/***********************************************Conversion***************************************************/
/* The conversion functions convert data/time values from one format to another. For example, you can convert a time
* string yyyy-MM-dd HH:mm:ss to a Unix epoch value using the unix_timestamp function. The from_unixtime function
* converts a Unix epoch value to a string representation. The built-in conversion function include:
* - unix_timestamp
* - from_unixtime
* - to_date
* - quarter
* - day
* - dayofyear
* - weekofyear
* - from_utc_timestamp
* - to_utc_timestamp
* */
/* Scala doesn't have its own lib for Dates and timestamps, so we need to use java lib for current time*/
val dateDF=List("2017-09-16","2017-09-17","2017-09-18","2017-12-31","2017-03-20").toDF("date")
dateDF.show()
/**************************************unix_timestamp*****************************************/
/* unix_timestamp take a column, and second argument date format is optional. if no format is provided, the
* default format will be yyyy-MM-dd HH:mm:ss. It returns null if conversion fails. The unix_timestamp is a
* bigint which describes number of seconds that have elapsed since 0 (UTC) 01/01/1970.
* */
val unix_timeDF=dateDF.withColumn("unix_timestamp",unix_timestamp($"date","yyyy-MM-dd"))
unix_timeDF.show()
/***************************************from_unixtime****************************************/
/* from_unixtime takes a column, second argument date fromat is optional. if no format is provided, the
* default format will be yyyy-MM-dd HH:mm:ss. It returns null if conversion fails.*/
val backToDateStringDF=unix_timeDF.withColumn(("date_string"),from_unixtime($"unix_timestamp"))
backToDateStringDF.show()
/******************************************to_date*********************************************/
/* to_date method takes a column of string date, then convert it to a date object. */
val dateObjDf= dateDF.withColumn("dateObj",to_date($"date","yyyy-MM-dd"))
dateObjDf.show()
/****************************************quarter/dayofmonth/dayofyear/weekofyear***********************/
/* quarter method takes a column of date(string,timestamp,dateObj), then returns the quarter of the year
* in the range 1 to 4.
* Example: quarter('2015-04-08') = 2.*/
val quarterDf=dateDF.withColumn("quarter",quarter($"date"))
quarterDf.show()
/* dayofyear method takes a column of date(string,timestamp,dateObj), then returns the day of the year in the
* range 1 to 365. Example dayofyear('2017-09-16') = 259 */
val dayDf=dateDF.withColumn("dayOfYear",dayofyear($"date"))
dayDf.show()
/* dayofmonth method takes a column of date(string,timestamp,dateObj), then returns the day of the month in the
* range 1 to 31. Example dayofmonth('2017-09-16') = 16 */
val dayMonDf=dateDF.withColumn("dayOfMonth",dayofmonth($"date"))
dayMonDf.show()
/* weekofyear method takes a column of date(string,timestamp,dateObj), then returns the week of the year in the
* range 1 to 52. Example weekofyear('2017-09-16') = 37 */
val weekDf=dateDF.withColumn("weekOfYear",weekofyear($"date"))
weekDf.show()
/* current_date function returns the current date in date object type. The date_format mehtod
* can convert the date object to string with the given format*/
val currentDateDf=dateDF.withColumn("currentDate",date_format(current_date(), "y-MM-dd'T'hh:mm:ss.SSS'Z'"))
currentDateDf.show()
/* current_timestamp function returns the current date in type timestamp. */
val currentTimestampDf=dateDF.withColumn("currentTimestamp",current_timestamp())
currentTimestampDf.show()
/* from_utc_timestamp function convert a unix timestamp to a specific timezone date. In our case, it's "CET", It
* can be "EST" or any timezone code*/
val cetZoneDf=currentTimestampDf.withColumn("CET_Date",from_utc_timestamp($"currentTimestamp","CET"))
cetZoneDf.show()
/************************************Date arithmetic functions****************************************/
/* The arithmetic functions allow you to perform arithmetic operation on columns containing dates. For
* example, you can calculate the difference between two dates, add days to a date, or subtract days from a
* date. The built-in date arithmetic functions include:
* - datediff(end:Column,start:Column) -> Returns the number of days from start to end.
* - date_add(start:Column, int days) -> Returns the date that is days days after start
* - date_sub(start:Column, int days) -> Returns the date that is days days before start
* - add_months(StartDate:Column, numMonths:Int) -> Returns the date that is numMonths after startDate.
* - last_day(date:Column) -> Given a date column, returns the last day of the month which the given date belongs to.
* - next_day(date:Column,dayOfWeek:String) -> Given a date column, returns the first date which is later than the
* value of the date column that is on the specified day of the week.
* For example, next_day('2015-07-27', "Sunday") returns 2015-08-02
* because that is the first Sunday after 2015-07-27.
* Day of the week parameter is case insensitive, and accepts: "Mon",
* "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
* - months_between(date1:Column,date2:Column) -> Returns number of months between dates date1 and date2.
*
* */
/* datediff can only give the difference of two date in days, if you want to get second, hour difference, you need
* to convert the date in unix time and do end-start. for datediff, the two columns can be type
* of string, dateObject,timestamp, unix time. you don't need to convert them to the same type or format*/
val currentDateObj=currentDateDf.withColumn("dateObj",to_date($"date"))
//currentDateObj.show()
val dateDiff=currentDateObj.withColumn("dayDiff",datediff($"currentDate",$"date"))
dateDiff.show()
/* date_add , date_sub example*/
currentDateObj.select(date_add($"currentDate",180)).show()
currentDateDf.select(date_sub($"currentDate",180)).show()
/* next_day*/
currentDateDf.select(next_day($"currentDate","Fri")).show()
/* months_between*/
currentDateDf.select(months_between($"currentDate",$"date")).show()
}
/**********************************************************************************************************
* ******************************************** 4.5.6.4.1 Math functions ***************************************
*********************************************************************************************************/
def DSMathOperations(spark:SparkSession):Unit={
import spark.implicits._
/* The math functions operate on columns containing numerical values. Spark SQL comes with a long list of
* built-in math functions. Example include : The return value of the below function are all Columns.
* - abs(arg:Column) : Column -> compute the absolute value of the column
* - acos (arg:Column/cName:String) : Column -> compute the cosine inverse of the given range 0.0 through pi
* - approx_count_distinct(e:Column/cName:String,double rsd) -> Aggregate function: returns the approximate number
* of distinct items in a group.
* - ceil(e: Column) : Computes the ceiling of the given value.
* - cos(e: Column) : computes the cosine of the given value.
* - exp(e: Column) : Computes the exponential of the given value.
* - factorial "" : Computes the factorial of the given value.
* - floor : Computes the floor of the given value
* - hex : Computes hex value of the given column. (base 16)
* - hypot(a:Column,b:Column): Computes sqrt(a^2 + b^2) without intermediate overflow or underflow hypot(3,4)=5
* - log(e:column) : Computes the natural logarithm of the given value.
* - log(base:Double, e:Column): Returns the first argument-base logarithm of the second argument.
* - log10 : Computes the logarithm of the given value in base 10.
* - pow(e1:Column,e2:Column) : Returns the value of the first argument raised to the power of the second argument. e2
* can be a double
* - round : Returns the value of the column e rounded to 0 decimal places with HALF_UP round mode.
* - shiftLeft : Shift the given value numBits left
* - sqrt : Computes the square root of the specified float value.
* and other commonly used math functions
*
* Floor and ceiling give us the nearest interger of a number. For example, floor(5.2), ceil(5.2)=6,
* floor(5)=5, ceil(5)=5*/
val nums=List(NumTest(234.5,1,30),
NumTest(23.45,2,60),
NumTest(2.345,3,90),
NumTest(0.2345,4,180)).toDF()
nums.show()
/* round is a function that rounds a numeric value to the specified precision. When the given precision is a
* positive number, a given input numeric value is rouded to the decimal position specified by the the
* precision. When the specified precision is a zero or a negative number, a given input numeric value is
* rounded to the position of the integral part specified by the precision.
*
* For example round(23.45,0)=23,round(23.45,1)=23.5,round(23.45,-1)=20 */
nums.select(round($"doubleNum",0).alias("round0"),
round($"doubleNum",1).alias("round1"),
round($"doubleNum",-1).alias("round-1")).show()
/* sin,tan,cos take a column of int (angle), and returns the sin,cos,tan of the int*/
nums.select(sin($"angleNum").alias("sin"),
cos($"angleNum").alias("cos"),
tan($"angleNum").alias("tan")).show()
/* Pow method takes two argument, 1st is digital column, 2nd is the puissance number. it returns the puissance of
* the column*/
nums.select($"logNum",pow($"logNum",2)).show()
val genNumDf= nums.select($"logNum".alias("id"),
rand(seed=10).alias("uniform1"),
rand(seed=88).alias("uniform2"),
randn(seed=27).alias("normal"))
// we can combine as many function as we want in a line. the following example we calculate the
// the square root of the sin(a)^2+cos(a)^2
val degreeDf=genNumDf.select($"uniform1",sqrt(pow(sin($"uniform1"),2)+pow(cos($"uniform1"),2)))
degreeDf.show()
}
/**********************************************************************************************************
* ******************************************** 4.5.6.4.2 Statistic functions ******************************
*********************************************************************************************************/
def DSStatOperations(spark:SparkSession):Unit={
import spark.implicits._
val nums=List(NumTest(234.5,1,30),
NumTest(23.45,2,60),
NumTest(2.345,3,90),
NumTest(0.2345,4,180)).toDF()
/********************************************Generate random numbers**************************************/
/* rand is for generating values from a distribution, rand is the uniform distribution,
* randn is the standard normal distribution*/
val genNumDf= nums.select($"logNum".alias("id"),
rand(seed=10).alias("uniform1"),
rand(seed=88).alias("uniform2"),
randn(seed=27).alias("normal"))
genNumDf.show()
/**************************************Basic stat functions**********************************/
/*mean,min,max,avg takes a numeric column and return the mean, min, max, avg of the column*/
genNumDf.select(mean($"uniform1"),min($"normal"),avg($"normal"),max($"uniform1")).show()
/***********************************Covariance ***********************************************/
/* Covariance is a measure of how two variables change with respect to each other. A positive number would mean
*that there is a tendency that as one variable increases, the other increases as well. A negative number would mean
* that as one variable increases, the other variable has a tendency to decrease. The sample covariance of two columns
* of a DataFrame can be calculated as follows:
*
* Since spark2.3 cov is replaced by covar_pop and covar_samp
* covar_pop -> Aggregate function: returns the population covariance for two columns.
* covar_samp -> Aggregate function: returns the sample covariance for two columns.*/
val covOfUni=genNumDf.stat.cov("uniform1","uniform2")
println(s"covariance between uniform1 and uniform2: ${covOfUni}")
val covOfId=genNumDf.stat.cov("id","id")
println(s"covariance between id and id: ${covOfId}")
/* As you can see from the above, the covariance of the two randomly generated columns is close to zero,
* while the covariance of the id column with itself is very high.
*
* The covariance value of 9.17 might be hard to interpret. Correlation is a normalized measure of covariance
* that is easier to understand, as it provides quantitative measurements of the statistical dependence */
/* corr is a Aggregate function: returns the Pearson Correlation Coefficient for two columns.*/
val corrOfUni=genNumDf.stat.corr("uniform1","uniform2")
println(s"correlation between uniform1 and uniform2: ${corrOfUni}")
val corrOfId=genNumDf.stat.corr("id","id")
println(s"correlation between id and id: ${corrOfId}")
/******************************************************Cross Tabulation****************************************/
/* Cross Tabulation provides a table of the frequency distribution for a set of variables. Cross-tabulation is
* a powerful tool in statistics that is used to observe the statistical significance (or independence) of variables.
* Since Spark 1.4, users will be able to cross-tabulate two columns of a DataFrame in order to obtain the counts of
* the different pairs that are observed in those columns. Here is an example on how to use crosstab to
* obtain the contingency table.*/
val names = List("Alice", "Bob", "Mike")
val items = List("milk", "bread", "butter", "apples", "oranges")
val genList=generateDf(names,items,100)
// println(s"genList ${genList.mkString(",")}")
val genDf=genList.toDF()
genDf.stat.crosstab("name","item").show()
/* One important thing to keep in mind is that the cardinality of columns we run crosstab on cannot be too big.
* That is to say, the number of distinct “name” and “item” cannot be too large. Just imagine if “item” contains
* 1 billion distinct entries: how would you fit that table on your screen?!*/
/**********************************************Frequent Items**************************************/
/* Figuring out which items are frequent in each column can be very useful to understand a dataset.
* In the stat lib, we have freqItems method takes two arguments, 1st is a collection of column
* name(List, seq, array are ok), 2nd(optional) is the minimum percentage of the frequency. */
val freqItem=genDf.stat.freqItems(List("name","item"),0.4)
val freqName=genDf.stat.freqItems(Seq("name"))
freqItem.show()
freqName.show()
/* We can combine two columns to one columns as an Array.Then check the frequency of the combined column*/
val genCombineListDf=genDf.withColumn("name_item",struct("name","item"))
genCombineListDf.show(5)
val freqCombine=genCombineListDf.stat.freqItems(Seq("name_item"),0.4)
freqCombine.show()
}
/**********************************************************************************************************
* ******************************************** 4.5.6.5 String functions ***************************************
*********************************************************************************************************/
def DSStringOperations(spark:SparkSession):Unit={
import spark.implicits._
/* Spark SQL provides a variety of built-in functions for processing columns that contain string values. For
* example, you can split, trim or change case of a string. The built-in string functions include:
* - ascii(e:Column): Computes the numeric value of the first character of the string column, and returns the result
* as an int column.
* - base64(e:Column): Computes the BASE64 encoding of a binary column and returns it as a string column.
* - unbase64(e:Column): Decodes a BASE64 encoded string column and returns it as a binary column.
* - concat(e*:Column): Concatenates multiple input columns together into a single column.
* - concat_ws(sep:String,e*:Column): Concatenates multiple input string columns together into a single string column,
* using the given separator. The columns could be in a form of a collection of cols.
* - decode(e:Column,charset:String): Computes the column into a string from a binary using the provided
* character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE',
* 'UTF-16').
* - encode(e:Column,charset:String): Computes the column into a binary from a string using the provided character
* set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* - format_number(e:Column,int d): Formats numeric column x to a fromat like '###,###,###.##'for example a number
* 123456.78 will be transform into 123,456.78. rounded to d decimal places with
* HALF_EVEN round mode, and returns the result as a string column.
* If d is 0, the result has no decimal point or fractional part. If d is
* less than 0, the result will be null.
* - format_string(format:String,e*:Column): Formats the argument columns in printf-style and returns the
* result as a string column.
* - get_json_object(e:Column,path:String): Extracts json object from a json string based on json path specified,
* and returns json string of the extracted json object.
* - initcap(e:Column): Returns a new string column by converting the first letter of each word to uppercase.
* - instr(e:Column,subStr:String): Locate the position of the first occurrence of substr column in the given string.
* Returns null if either of the arguments are null.
* - length(e:Column): Computes the character length of a given string or number of bytes of a binary string.
* The length of character strings include the trailing spaces. The length of binary strings
* includes binary zeros.
* - levenshtein(l:Column,r:Column): Computes the Levenshtein distance of the two given string columns. Informally,
* the Levenshtein distance between two words is the minimum number of
* single-character edits (insertions, deletions or substitutions) required
* to change one word into the other
* - locate(subStr:String,str:Column): Locate the position of the first occurrence of substr of a string column.
* - lower(e:Column): Converts a string column to lower case.
* - upper(e:Column): Converts a string column to upper case.
* - lpad(e:Column,len:Int,pad:String): Left-pad the string column with pad to a length of len.
* - rpad(e:Column,len:Int,pad:String): Right-pad the string column with pad to a length of len.
* - ltrim(e:Column): Trim the spaces from left end for the specified string value. " a " => "a "
* - rtrim(e:Column,trimStr:String): Trim the specified character string from right end for the specified string column.
* if the trimStr arg is empty, it will trim the space. " a " => " a",
* - trim(e:Column,trimStr:String): Trim the specified character from both ends for the specified string column.
* if the trimStr arg is empty, it will trim the space. " a " => "a"
* - singleSpace(e:Column): Can removes all extra space between words. This come from a 3rd party lib spark-daria
* https://github.com/MrPowers/spark-daria/
* - regexp_extract(e:Column,exp:String,groupIdx:Int): Extract a specific group matched by a Java regular expr, from
* the specified string column.
* - regexp_replace(e:Column,pattern:Column,replacement:Column): Replace all substrings of the specified string value
* that match regexp with replacement
* - repeat(e:Column,n:Int): Repeats a string column n times and returns it as a new string column.
* - reverse(e:Column): Reverses the string column and returns it as a new string column.
* - soundex(e:Column): Returns the soundex code for the specified expression. Soundex is a phonetic algorithm for
* indexing words(string) by sound in English. It can match words despite minor differences in
* spelling. For example both "Robert" and "Rupert" return the same string "R163", while "Rubin"
* yields "R150". So Robert and Rupert is more similar than Rubin.
* - split(e:Column,pattern:String): Splits string column around pattern (pattern is a regular expression)
* - substring(e:Column,position:Int,len:Int): Substring starts at pos and is of length len when str is String type
* or returns the slice of byte array that starts at pos in byte and is
* of length len when column is Binary type
* - substring_index(e:Column,delim:String,count:Int): Returns the substring from string str before count occurrences
* of the delimiter delim.
* - translate(src:Column,matchStr:String,replaceStr:String): Translate any character in the src by a character in
* replaceString.
* and other commonly used string functions.*/
val nasdaqDf=List(Nasdaq(1,"1347 Captial Corp.","TFSC","9.43","$56.09M",56090000,"2014","Finance","Business Service",
"http://www.nasdaq.com/symbol/tfsc"),
Nasdaq(2,"1347 Property Insurance.","PIH","7.66","$48.7M",48700000,"n/a","Finance","Property-Casualty Insurance",
"http://www.nasdaq.com/symbol/pih"),
Nasdaq(300,"1-800 FLOWERS.COM","FLWS","10.32","$667.78M",667780000,"1999","Consumer Services","Other Speciality",
"http://www.nasdaq.com/symbol/flws")
).toDF()
nasdaqDf.show()
/*For example, we want to fix the length of ClientId(e.g. 1->00000001), we can use functions such as lpad, rpad
* The number of 0 lpad add is auto determine to fit the length of 8 */
nasdaqDf.select($"clientID",lpad($"clientID",8,"0").alias("new_clientID")).show()
nasdaqDf.select($"symbol",rpad($"symbol",6,"0").alias("new_symbol")).show()
/* To make big Int to String with delimiter, we can use format_number*/
nasdaqDf.select($"marketCapAmount",format_number($"marketCapAmount",0)).show()
/* format_string method can helps to build new string based on existing columns. The following example, we add Year:
* in front of ipoYear*/
nasdaqDf.select($"ipoYear",format_string("Year: %s",$"ipoYear")).show()
/* reverse every letter of the string*/
nasdaqDf.select($"name",reverse($"name")).show()
/* trim only remove all leading and trailing whitespace, space between words are safe*/
nasdaqDf.select($"name",trim($"name")).show()
/* split example*/
nasdaqDf.select($"industry",split($"industry"," ")).show()
/* regexp_extract third argument groupIdx is the group id of the regluar expression. For example if we have a
* regular expression (A)(B)(C)(D). A has groupIdx 1, B=2,C=3,D=4. Check this site https://regexr.com/ it can explain
* how to use regular expression*/
nasdaqDf.select($"summaryQuote",regexp_extract($"summaryQuote","(http://)(.*?com)(/symbol/)([a-z]*)",2)).show(false)
// We can also replace sub string in a column which matches a regular expression. 2nd and 3rd args can be column or
//string. The following example replace http by https
nasdaqDf.select(regexp_replace($"summaryQuote","http://","https://")).show()
//Here we replace url.com by pengfei.org
nasdaqDf.select(regexp_replace($"summaryQuote","[a-z]*.com","pengfei.org")).show(false)
//We can also use translate to replace string column.
nasdaqDf.select($"ipoYear",translate($"ipoYear","n/a","8888")).show()
/*get levenshtein value between industry and sector*/
nasdaqDf.select(levenshtein($"industry",$"sector")).show()
/******************************************SubString************************************************/
val subNasdaqDf=nasdaqDf.withColumn("Sub_ipoYear", substring($"ipoYear",0,2)).show()
}
/**********************************************************************************************************
* ******************************************** 4.5.6.6 Window functions ***************************************
*********************************************************************************************************/
def DSWindowOperations(spark:SparkSession):Unit={
import spark.implicits._
/* empDF is the normal data , empDF1 has the same deptno for all users*/
val empDF = spark.createDataFrame(Seq(
(7369, "SMITH", "CLERK", 7902, "17-Dec-80", 800, 20, 10),
(7499, "ALLEN", "SALESMAN", 7698, "20-Feb-81", 1600, 300, 30),
(7521, "WARD", "SALESMAN", 7698, "22-Feb-81", 1250, 500, 30),
(7566, "JONES", "MANAGER", 7839, "2-Apr-81", 2975, 0, 20),
(7654, "MARTIN", "SALESMAN", 7698, "28-Sep-81", 1250, 1400, 30),
(7698, "BLAKE", "MANAGER", 7839, "1-May-81", 2850, 0, 30),
(7782, "CLARK", "MANAGER", 7839, "9-Jun-81", 2450, 0, 10),
(7788, "SCOTT", "ANALYST", 7566, "19-Apr-87", 3000, 0, 20),
(7839, "KING", "PRESIDENT", 0, "17-Nov-81", 5000, 0, 10),
(7844, "TURNER", "SALESMAN", 7698, "8-Sep-81", 1500, 0, 30),
(7876, "ADAMS", "CLERK", 7788, "23-May-87", 1100, 0, 20)
)).toDF("empno", "ename", "job", "mgr", "hiredate", "sal", "comm", "deptno")
val empDF1 = spark.createDataFrame(Seq(
(7369, "SMITH", "CLERK", 7902, "17-Dec-80", 800, 20, 10),
(7499, "ALLEN", "SALESMAN", 7698, "20-Feb-81", 1600, 300, 10),
(7521, "WARD", "SALESMAN", 7698, "22-Feb-81", 1250, 500, 10),
(7566, "JONES", "MANAGER", 7839, "2-Apr-81", 2975, 0, 10),
(7654, "MARTIN", "SALESMAN", 7698, "28-Sep-81", 1250, 1400, 10),
(7698, "BLAKE", "MANAGER", 7839, "1-May-81", 2850, 0, 10),
(7782, "CLARK", "MANAGER", 7839, "9-Jun-81", 2450, 0, 10),
(7788, "SCOTT", "ANALYST", 7566, "19-Apr-87", 3000, 0, 10),
(7839, "KING", "PRESIDENT", 0, "17-Nov-81", 5000, 0, 10),
(7844, "TURNER", "SALESMAN", 7698, "8-Sep-81", 1500, 0, 10),
(7876, "ADAMS", "CLERK", 7788, "23-May-87", 1100, 0, 10)
)).toDF("empno", "ename", "job", "mgr", "hiredate", "sal", "comm", "deptno")
/* ******************************************** 4.5.6.6.1 Window functions Intro**********************************/
/* Spark SQL supports window functions for analytics. A window function performs a calculation across a set
* of rows that are related to the current row. The built-in window functions provided by Spark SQL include two
* categories:
* 1. Ranking functions:
* - rank: returns the rank of rows within a window partition
* - dense_rank: returns the rank of rows within a window partition, without any gaps. For example,
* if you were ranking a competition using dense_rank and had three people tie for second place,
* you would say that all three were in second place and that the next person came in third.
* Rank would give me sequential numbers, making the person that came in third place (after the ties)
* would register as coming in fifth.
*
* - percent_rank: returns the relative rank (i.e. percentile) of rows within a window partition.
* - ntile(n:Int): returns the ntile group id (from 1 to n inclusive) in an ordered window partition. For
* example, if n is 4, the first quarter of the rows will get value 1, the second quarter will
* get 2, the thirds quarter will get 3, and the last will get 4. If the rows are less than n, it
* works too.
* - row_number: returns a sequential number starting at 1 within a window partition.
* 2. Analytic functions:
* - cume_dist: returns the cumulative distribution of values within a window partition, i.e. the fraction
* of rows that are below the current row. N = total number of rows in the partition.
* cumeDist(x) = number of values before (and including) x / N. similar to percent_rank()
*
* - lag(e:Column,offset:Int,defaultValue:Object): returns the value that is offset rows before the current row, and null if there
* is less than offset rows before row. For example, an offset of one will return the previous row at
* any given point in the window partition. The defaultValue is optional
* - lead(e:Column,offset:Int): returns the value that is offset rows after the current row, and null if
* there is less than offset rows after the current row. For example, an offset of one will return
* the next row at any given point in the window partition.
* - currentRow(): Window function: returns the special frame boundary that represents the current row in
* the window partition.
* 3. Aggregation functions:
* -sum(e:Column): returns the sum of selecting column for each partitions.
* - first(e:Column): returns the first value within each partition.
* - last(e:Column): returns the last value within each partition.
*
*
*
*
* .*/
/* ******************************************** 4.5.6.6.2 Window specification Intro **********************************/
/* To use window functions, we need to create a window specification. A window specification defines which rows
* are included in the frame associated with a given input row. A window specification includes three parts:
* 1. Partitioning specification: controls which rows will be in the same partition with the given row.
* Also, the user might want to make sure all rows having the same value for the category column are
* collected to the same machine before ordering and calculating the frame. If no partitioning specification
* is given, then all data must be collected to a single machine.
*
* 2. Ordering specification: controls the way that rows in a partition are ordered, determining the position of the
* given row in its partition.
*
* 3. Frame specification: states which rows will be included in the frame for the current input row, based on their
* relative position to the current row. For example, "the three rows preceding the current
* row to current row" describes a frame including the current input row and three rows
* appearing before the current row.
*
* In spark SQL, the partition specification are defined by keyword partitionBy, ordering specification is defined by
* keyword orderBy. The following example show a window partitionBy deptpartment number, and ordered by salary.
* We don't define a here. */
/*********************************** 4.5.6.6.3 Window functions example(Ranking) **********************************/
/* All the ranking function will only calculate rows in side each partitions for all partitions, for example
* the job has 5 different values: Analyst, salesman, clerk, mananger, president. So there will be 5 partitions,
* When we do the window function, it will do the calculation for each partition. When we do rank on salary, there is a rank
* for analyst, a rank for salesman, etc. I change the depno, so all are from the same department, and same partition
* so same frame. As a result, there is only one rank for rank salary. */
/*********************************rank/denseRank/percentRank*****************************************/
/* To be able to do rank, we need to create a window specification, In this example, we create two window specification
* depNoWindow is a window specification partitionBy deptatement number, jobWindow is partitionBy job types*/
val depNoWindow=Window.partitionBy($"deptno").orderBy($"sal".desc)
val jobWindow=Window.partitionBy($"job").orderBy($"sal".desc)
/* After creation of window specification, we can do rank in depNoWindow*/
val rankTest=rank().over(depNoWindow)
val denseRankTest=dense_rank().over(depNoWindow)
val percentRankTest=percent_rank().over(depNoWindow)
empDF.select($"*", rankTest as "rank",denseRankTest as "denseRank",percentRankTest as "percentRank").show()
/* As we partitioned the data with column deptno, in the empDF example, we could see, rank do ranking for each dept
empDF result:
+-----+------+---------+----+---------+----+----+------+----+---------+-----------+
|empno| ename| job| mgr| hiredate| sal|comm|deptno|rank|denseRank|percentRank|
+-----+------+---------+----+---------+----+----+------+----+---------+-----------+
| 7788| SCOTT| ANALYST|7566|19-Apr-87|3000| 0| 20| 1| 1| 0.0|
| 7566| JONES| MANAGER|7839| 2-Apr-81|2975| 0| 20| 2| 2| 0.5|
| 7876| ADAMS| CLERK|7788|23-May-87|1100| 0| 20| 3| 3| 1.0|
| 7839| KING|PRESIDENT| 0|17-Nov-81|5000| 0| 10| 1| 1| 0.0|
| 7782| CLARK| MANAGER|7839| 9-Jun-81|2450| 0| 10| 2| 2| 0.5|
| 7369| SMITH| CLERK|7902|17-Dec-80| 800| 20| 10| 3| 3| 1.0|
| 7698| BLAKE| MANAGER|7839| 1-May-81|2850| 0| 30| 1| 1| 0.0|
| 7499| ALLEN| SALESMAN|7698|20-Feb-81|1600| 300| 30| 2| 2| 0.25|
| 7844|TURNER| SALESMAN|7698| 8-Sep-81|1500| 0| 30| 3| 3| 0.5|
| 7521| WARD| SALESMAN|7698|22-Feb-81|1250| 500| 30| 4| 4| 0.75|
| 7654|MARTIN| SALESMAN|7698|28-Sep-81|1250|1400| 30| 4| 4| 0.75|
+-----+------+---------+----+---------+----+----+------+----+---------+-----------+
empDF1 result: all users belong to one dept
+-----+------+---------+----+---------+----+----+------+----+---------+-----------+
|empno| ename| job| mgr| hiredate| sal|comm|deptno|rank|denseRank|percentRank|
+-----+------+---------+----+---------+----+----+------+----+---------+-----------+
| 7839| KING|PRESIDENT| 0|17-Nov-81|5000| 0| 10| 1| 1| 0.0|
| 7788| SCOTT| ANALYST|7566|19-Apr-87|3000| 0| 10| 2| 2| 0.1|
| 7566| JONES| MANAGER|7839| 2-Apr-81|2975| 0| 10| 3| 3| 0.2|
| 7698| BLAKE| MANAGER|7839| 1-May-81|2850| 0| 10| 4| 4| 0.3|
| 7782| CLARK| MANAGER|7839| 9-Jun-81|2450| 0| 10| 5| 5| 0.4|
| 7499| ALLEN| SALESMAN|7698|20-Feb-81|1600| 300| 10| 6| 6| 0.5|
| 7844|TURNER| SALESMAN|7698| 8-Sep-81|1500| 0| 10| 7| 7| 0.6|
| 7521| WARD| SALESMAN|7698|22-Feb-81|1250| 500| 10| 8| 8| 0.7|
| 7654|MARTIN| SALESMAN|7698|28-Sep-81|1250|1400| 10| 8| 8| 0.7|
| 7876| ADAMS| CLERK|7788|23-May-87|1100| 0| 10| 10| 9| 0.9|
| 7369| SMITH| CLERK|7902|17-Dec-80| 800| 20| 10| 11| 10| 1.0|
+-----+------+---------+----+---------+----+----+------+----+---------+-----------+
*/
/* repeat above process with Job window*/
val rankTestJob=rank().over(jobWindow)
val denseRankTestJob=dense_rank().over(jobWindow)
val percentRankJob=percent_rank().over(jobWindow)
empDF.select($"*", rankTestJob as "rank",denseRankTestJob as "denseRank",percentRankJob as "percentRank").show()
/*************************************** ntile ***********************************/
/* In the following ntile example, we choose to divide each partition into 6 tiles (DF1 has the same dep no,
* so one partition, DF has multiple dep no, which means multiple partition.).
* */
val ntileTest=ntile(6).over(depNoWindow)
/* rowNumber on department Number window, it will stats with 1 for each partition*/
val row_numberTest=row_number().over(depNoWindow)
empDF.select($"*", ntileTest as "ntile", row_numberTest as "row_number").show()
/* The ntile/rowNumber on job window(5 partition)*/
val ntilJob=ntile(6).over(jobWindow)
val rowNumberJob=row_number().over(jobWindow)
empDF1.select($"*",ntilJob as "ntileJob",rowNumberJob as "rowNumberJob").show()
/*********************************** 4.5.6.6.3 Window functions example(Analytics) *******************************/
/***********************************lag/lead*************************************************/
//Does not work,
/*val lagTest=lag($"sal",1,0)
val leadTest=lead($"sal",2,0)
empDF1.select($"*", lagTest as "lagHireDateValue", leadTest as "leadHireDateValue").show()*/
/*********************************cume_dist*******************************************/
val cumeDistTest=cume_dist().over(depNoWindow)
empDF.select($"*",cumeDistTest as "Cume Dist").show()
//currentRow works for spark 2.3.0
//empDF.select($"*",currentRow().over(depNoWindow) as "currentRow").show
/************************************4.5.6.6.4 Use aggregate function in window***********************************/
// The aggregation function will do the aggregation inside each partition. Each row stores the aggregated return
// value
val sumTest=sum($"sal").over(depNoWindow)
empDF.select($"*", sumTest as "sum_of_sal").show()
/******************************first/last ******************************************/
val firstValue=first($"sal").over(depNoWindow)
val lastValue=last($"sal").over(depNoWindow)
empDF1.select($"*",firstValue as "firstValue", lastValue as "lastValue").show()
empDF.select($"*",firstValue as "firstValue", lastValue as "lastValue").show()
/* It does not work well for the last function, the last value of the current row is always the value of the current
* row. So only the last row of the current partition returns the right value. all other rows last value is "wrong"*/
/********************************************* 4.5.6.6.5 frame specification **********************************/
/* There are two types of frame: ROWframe, RANGEframe. A row frame specification is defined by keyword rowsBetween().
* A range frame specification is defined by keyword rangeBetween().
*
* https://databricks.com/blog/2015/07/15/introducing-window-functions-in-spark-sql.html This page explains well the
* difference between row frame and range frame
*
* Row frames: are based on physical offsets from the position of the current input row. We can use <value> preceding
* and <value> FOLLOWING to describe the number of rows appear before and after the current input row.
* You can also use CURRENT ROW to describe boundary. So the size of frame is fixed. for example:
* The frame (ROWS BETWEEN 1 precding AND 1 FOLLOWING) always has three rows
*
*
* RANGE frame: are based on logical offsets from the position of the current input row. A logical offset is the
* difference between the value of ther ordering expression of the current input row and the value of
* that same expression of the boundary row of the frame. The size of frame are different based on the
* value of current row.
* */
val rowFrameWindow=Window.partitionBy($"deptno").orderBy($"sal".desc).rowsBetween(Window.currentRow,Window.unboundedFollowing)
val lastOfFrame=last($"sal").over(rowFrameWindow)
empDF.select($"*",lastOfFrame as "last_val").show()
/*Now the last val is correct*/
}
/**********************************************************************************************************
* ******************************************** 4.5.6.7 miscellaneous functions ***************************************
*********************************************************************************************************/
def DSMiscellaneousOperations(spark:SparkSession):Unit={
import spark.implicits._
/* Spark Sql supports also many other functions:
* - crc32(e:Column):bigInt : Calculates the cyclic redundancy check value (CRC32) of a binary/String column and returns the
* value as a bigint. It often used for data integrity checking
* - md5(e:Column):String : Calculates the MD5 digest of a binary/String column and returns the value as a 32
* character hex string, or NULL if the argument was NULL
* - sha1(e:Column):String : Calculates the SHA-1 digest of a binary/String column and returns the value as a 40 character
* hex string.
* - sha2(e:Column,numBits:Int) : Calculates the SHA-2 family of hash functions of a binary/String value and returns
* the value as a hex string. NumBits controls the number of bits in the message
* disgest. numBits - one of 224, 256, 384, or 512.
* */
val df=List("I love","eat","apple","and","orange").toDF("word")
df.show()
df.select($"word",crc32($"word")).show()
df.select($"word",md5($"word")).show()
df.select($"word",sha1($"word")).show()
df.select($"word",sha2($"word",512)).show()
}
def generateDf(names:List[String],items:List[String],size:Int):List[crossTab]={
var loopSize=size
val random=scala.util.Random
var result:List[crossTab] = List[crossTab]()
while(loopSize>0){
val name_indice=random.nextInt(names.length)
val item_indice=random.nextInt(items.length)
val element=crossTab(loopSize,names(name_indice),items(item_indice))
result=element::result
loopSize=loopSize-1
}
return result
}
/**********************************************************************************************************
* ******************************************** 4.5.6.8 User define functions ****************************
*********************************************************************************************************/
def DSUDFOperations(spark:SparkSession):Unit={
import spark.implicits._
val empDF = spark.createDataFrame(Seq(
(7369, "SMITH", "CLERK", 7902, "17-Dec-80", 800, 20, 10),
(7499, "ALLEN", "SALESMAN", 7698, "20-Feb-81", 1600, 300, 30),
(7521, "WARD", "SALESMAN", 7698, "22-Feb-81", 1250, 500, 30),
(7566, "JONES", "MANAGER", 7839, "2-Apr-81", 2975, 0, 20),
(7654, "MARTIN", "SALESMAN", 7698, "28-Sep-81", 1250, 1400, 30),
(7698, "BLAKE", "MANAGER", 7839, "1-May-81", 2850, 0, 30),
(7782, "CLARK", "MANAGER", 7839, "9-Jun-81", 2450, 0, 10),
(7788, "SCOTT", "ANALYST", 7566, "19-Apr-87", 3000, 0, 20),
(7839, "KING", "PRESIDENT", 0, "17-Nov-81", 5000, 0, 10),
(7844, "TURNER", "SALESMAN", 7698, "8-Sep-81", 1500, 0, 30),
(7876, "ADAMS", "CLERK", 7788, "23-May-87", 1100, 0, 20)
)).toDF("empno", "ename", "job", "mgr", "hiredate", "sal", "comm", "deptno")
/* Spark SQL allows user-defined functions (UDFs) and user-defined aggregation functions (UDAFs). Both
* UDFs and UDAFs perform custom computations on a dataset.
* - UDF performs custom computation one row at a time and returns a value for each row.
* - UDAF applies custom aggregation over groups of rows.
* UDFs and UDAFs can be used just like the built-in functions after they have been registered with Spark SQL.*/
/***************************************** 4.5.6.8.1 User define functions(UDFs) *****************************/
/* To use a udf, you need to do 3 things
* - 1. define your function(a code bloc which can be called in another scala script. It can be a class, object,
* a method, a function, etc.)
* - 2. Register your function to spark session.
* - 3. Use the registered function in a allowed method
*
* In the following example, we define a fucntion which calculate the net salary*/
val taxRate:Double=0.15
//You need to specify the argument type explicitlly, otherwise it wont work
spark.udf.register("getNetSal",(sal:Int,taxRate:Double)=>getNetSal(sal,taxRate))
// lit function is for adding literal values as column
val taxDF=empDF.withColumn("taxRate",lit(taxRate))
//taxDF.show()
taxDF.withColumn("netSal",expr("getNetSal(sal,taxRate)")).show()
/* It's also possible to use udf in filter*/
/****************************** 4.5.6.8.3 User define aggregation functions(UDAFs) *****************************/
/* In order to write a UDAF, we need to extend class UserDefinedAggregateFunctions and overwrite follwoing methods
* - initialize: On a given node, this method is called once for each group (after groupBy)
* - update: For a given group, spark will call “update” for each input record of that group.
* - merge: if the function supports partial aggregates, spark might (as an optimization) compute partial
* result and combine them together.
* - evaluate: Once all the entries for a group are exhausted, spark will call evaluate to get the final result.
*
* The execution order can vary in the following two ways:
* - 1. No partial aggregates (or combiner)
* initialize->update->update->...->evaluate No merge needed
* - 2. Supports partial aggregates. You can have merger between groups, and merger of merger return value
* And finally, use evaluate to return the final value
* initialize->update->update->...->merge-> merge01 ->merge012
* initialize1->update1->update1->...->merge1->merge01->merge012
* initialize2->update2->update2->... ->merge2->merge012
*
* I use the class GeometricMean and CustomArthMean to illustrate how to write UDAFs, */
//decalre and register UDAFs
val geometricMean=new GeometricMean
spark.udf.register("gm",geometricMean)
val customArthMean=new CustomArthMean
spark.udf.register("am",customArthMean)
val ids=spark.sqlContext.range(1,20)
// ids.printSchema()
// println(s"ids value: ${ids.collect().mkString(",")}")
val df=ids.select($"id",$"id" % 3 as "group_id")
df.orderBy($"group_id".desc).show()
// We usually use aggregation fucniton after groupBy, but it also works without
df.groupBy("group_id").agg(expr("gm(id) as GeometricMean")).show()
df.groupBy("group_id").agg(expr("am(id) as ArthmeticMean")).show()
df.agg(expr("am(id) as ArthmeticMean")).show()
}
def getNetSal(sal:Int,taxRate:Double):Double={
return sal*(1.00-taxRate)
}
case class crossTab(id:Int,name:String,item:String)
case class NumTest(doubleNum:Double,logNum:Int,angleNum:Int)
case class Nasdaq(clientID:Int,name:String,symbol:String,lastSale:String,marketCapLabel:String,marketCapAmount:Long,
ipoYear:String,sector:String,industry:String,summaryQuote:String)
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson19_NLP/Lesson19_Keyword_extraction.scala | <filename>LearningSpark/src/main/java/org/pengfei/Lesson19_NLP/Lesson19_Keyword_extraction.scala<gh_stars>0
package org.pengfei.Lesson19_NLP
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{DataFrame, SparkSession}
object Lesson19_Keyword_extraction {
def main(args:Array[String]):Unit= {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().appName("Lesson19_Keyword_extraction").master("local[2]").getOrCreate()
import spark.implicits._
/************************************************************************************************
* ******************************* Lesson19 Keyword extraction *********************************
* ******************************************************************************************/
/* In this lesson, we will learn how to extract key word from articles, since they provide a concise representation
of the article’s content. Keywords also play a crucial role in locating the article from information retrieval
systems, bibliographic databases and for search engine optimization. Keywords also help to categorize the article
into the relevant subject or discipline. We will use NLP techniques on a collection of articles to extract keywords
About the dataset
The dataset which we use is from Kaggle (https://www.kaggle.com/benhamner/nips-papers/home). Neural Information
Processing Systems (NIPS) is one of the top machine learning conferences in the world. This dataset includes the
title and abstracts for all NIPS papers to date (ranging from the first 1987 conference to the current 2016 conference).
The nips-papers.csv contains the following columns:
- id,
- year,
- title,
- event_type,
- pdf_name : the name of the pdf file
- abstract : abstract of the paper
- paper_text : paper main content body
In this lesson, we focus on the concept of keyword extraction, so we only use abstracts of these articles to extract
keywords, because full text search is time consuming
*/
/************************** 19.0 key stages of keyword extraction ************************/
/*
1. Text pre-processing
a. noise removal
b. normalisation
2. Data Exploration
a. Word cloud to understand the frequently used words
b. Top 20 single words, bi-grams and tri grams
3. Convert text to a vector of word counts
4. Convert text to a vector of term frequencies
5. Sort terms in descending order based on term frequencies to identify top N keywords
*/
/************************ 19.1 Preliminary text exploration ***********************************/
/* The origin papers.csv is exported with pandas with default delimiter ",", which cause conflit
* in the main text column which has many ",". If we read it with spark, we have many problems, but
* if we read with pandas, it works, so we read the papers.csv and select the "id, year, title, abstract
* columns and export to csv(abstract1.csv) with "|" as delimiter.
* */
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val filePath=s"${path}/pyspark/Lesson2_Keyword_Extraction/abstract1.csv"
val rawDf = spark.read
.option("inferSchema", true)
.option("header",true)
.option("nullValue"," ")
.option("encoding", "UTF-8")
.option("delimiter","|")
.csv(filePath)
//rawDf.show(5)
/* we select only id, year, title and abstract as columns, then we merge the title and abstract to a new column
abstract1 */
val rawCleanDf=replaceSpecValue(rawDf,Array("abstract"),"Abstract Missing","")
//rawCleanDf.show(5)
val df=rawCleanDf.withColumn("abstract1",concat($"title",lit(" "),$"abstract"))
.drop($"title")
.drop($"abstract")
df.show(5)
val totalWordCountDf=df.withColumn("word_count",size(split($"abstract1"," ")))
totalWordCountDf.show(5)
val wordsExplodeDf=df.withColumn("words",explode(split($"abstract1"," ")))
wordsExplodeDf.show(5,false)
val wordOccurrence=wordsExplodeDf.groupBy("words").count().sort(desc("count"))
wordOccurrence.show(10,false)
}
/**
* This function takes a data frame, a list of column names, a old value, and a new value, it will replace the old
* value by the new value in all given columns of the data frame.
*
* @author <NAME>
* @version 1.0
* @since 2018-12-20
* @param rawDf The source data frame.
* @param colNames A list of column names
* @param specValue A string value which needs to be replaced
* @param newValue A string value which will repalce the old value
* @return DataFrame
* */
def replaceSpecValue(rawDf:DataFrame,colNames:Array[String],specValue:String,newValue:String):DataFrame={
/*Step 0 : cast all column to string*/
val spark=rawDf.sparkSession
val df=rawDf.select(rawDf.columns.map(c=>col(c).cast(StringType)):_*)
/*Step 1 : transform spec value to null*/
var result=df
for(colName<-colNames){
val newColName=colName+"_tmp"
result=result.withColumn(newColName, when(result(colName) === specValue, newValue).otherwise(result(colName))) //create a tmp col with digitnull
.drop(colName) //drop the old column
.withColumnRenamed(newColName,colName) // rename the tmp to colName
}
result
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_Exc02_UDAF.scala | package org.pengfei.Lesson04_Spark_SQL
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object Lesson04_Exc02_UDAF {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson04_Exc02_UDAF").master("local[2]").getOrCreate()
import spark.implicits._
val data=spark.sparkContext.parallelize(Array(
("B",List(3,4)),
("C",List(3,5)),
("A",List(2,6)),
("B",List(3,11,4,9)),
("C",List(5,6)),
("A",List(2,10,7,6))
)).toDF("key","device")
data.show()
val mergeUDAF=new MergeListsUDAF()
val dataAfterMerge=data.groupBy("key").agg(mergeUDAF($"device"))
dataAfterMerge.show()
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/classification/LinearRegressionOnCalHousing.scala | package org.pengfei.spark.ml.classification
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
import org.apache.spark.ml.regression.LinearRegression
import org.pengfei.spark.ml.data.preparation.NullValueEliminator
object LinearRegressionOnCalHousing {
def main(args:Array[String]): Unit ={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("CalHousing").
getOrCreate()
//spark.conf.set("")
import spark.implicits._
val inputFile = "file:///DATA/data_set/spark/Cali_Housing"
val schema = StructType(Array(
StructField("Logitude", DoubleType, true),
StructField("Latitude", DoubleType, true),
StructField("HousingMedianAge", DoubleType, true),
StructField("TotalRooms", DoubleType, true),
StructField("TotalBedrooms", DoubleType, true),
StructField("Population", DoubleType, true),
StructField("Households", DoubleType, true),
StructField("MedianIncome", DoubleType, true),
StructField("MedianHouseValue", DoubleType, true)))
//Get the csv file as dataframe
val housingDF=spark.read.format("csv").option("delimiter",",").schema(schema).load(inputFile)
housingDF.show(5)
val features=Array("Logitude","Latitude","HousingMedianAge","TotalRooms","TotalBedrooms")
val nullValueEliminator = new NullValueEliminator()
val cleanDF=nullValueEliminator.removeNullValueOfFeatureColumns(housingDF,features)
/*for(feature <- features){
housingDF=housingDF.filter(housingDF(feature).isNotNull)
}*/
//nonNullDF.filter("Logitude is null").show
//Split dataset into trainingData and testData
val Array(trainingData, testData) = cleanDF.randomSplit(Array(0.8, 0.2))
//set label column
val labelColumn = "MedianHouseValue"
//define assembler to collect the columns into a new column with a single vector - "features"
val assembler = new VectorAssembler().setInputCols(Array("Logitude", "Latitude","HousingMedianAge","TotalRooms","TotalBedrooms","Population","Households","MedianIncome"))
.setOutputCol("features")
//define the linear regression model
val lr=new LinearRegression().setMaxIter(10).setRegParam(0.3).setElasticNetParam(0.8)
.setLabelCol(labelColumn)
.setFeaturesCol("features")
.setPredictionCol("Predicted "+ labelColumn)
//define pipeline stages
val stages = Array(assembler,lr)
//Construct the pipeline
val pipeline = new Pipeline().setStages(stages)
//Fit training data to pipeline
val model = pipeline.fit(trainingData)
//Get prediction of testing data
val predictions = model.transform(testData)
//Evaluate the model error/deviation of the regression using the Root Mean Squared deviation
val evaluator = new RegressionEvaluator()
.setLabelCol(labelColumn)
.setPredictionCol("Predicted " + labelColumn)
.setMetricName("rmse")
//compute the error
val error= evaluator.evaluate(predictions)
println(error)
}
}
/*
* Data set Features
*
*longitude: continuous.
latitude: continuous.
housingMedianAge: continuous.
totalRooms: continuous.
totalBedrooms: continuous.
population: continuous.
households: continuous.
medianIncome: continuous.
medianHouseValue: continuous.
*
* */ |
pengfei99/Spark | common_utils/scala/data_io.scala | <filename>common_utils/scala/data_io.scala<gh_stars>0
/* This function write the input dataframe to the output file system*/
def WriteDataToDisk(df:DataFrame,outputPath:String,fileName:String): Unit ={
df.coalesce(1).write.mode(SaveMode.Overwrite)
.option("header","true")
.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false") //Avoid creating of crc files
.option("encoding", "UTF-8")
.option("delimiter", outputCsvDelimiter) // set tab as delimiter, required by tranSMART
.csv(outputPath+"/"+fileName)
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/formation/USAFlight.scala | <reponame>pengfei99/Spark<filename>WordCount/src/main/java/org/pengfei/spark/formation/USAFlight.scala<gh_stars>0
package org.pengfei.spark.formation
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
object USAFlight {
def main(args:Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("USAFlight").
getOrCreate()
//spark.conf.set("")
import spark.implicits._
val inputFile = "file:////home/pliu/Downloads/flight.csv"
/*
* DEST_COUNTRY_NAME,ORIGIN_COUNTRY_NAME,count
* */
val schema = StructType(Array(
StructField("DEST_COUNTRY_NAME", StringType, true),
StructField("ORIGIN_COUNTRY_NAME", StringType, true),
StructField("COUNT", IntegerType, true)))
val flightDF=spark.read.format("csv").option("delimiter", ",").schema(schema).load(inputFile)
//flightDF.show(10)
/* sort the data set by flight number*/
val origin_usa=flightDF.filter($"ORIGIN_COUNTRY_NAME"==="United States").orderBy(flightDF("COUNT").desc)
//origin_usa.show(5)
/* count the total number of flight arrive or level*/
val count_leave_usa=origin_usa.agg(sum("COUNT").cast("long")).first.getLong(0)
print("Leave usa:"+count_leave_usa)
val dest_usa=flightDF.filter($"DEST_COUNTRY_NAME"==="United States").orderBy(flightDF("COUNT").desc)
val count_dest_usa=dest_usa.agg(sum("COUNT").cast("long")).first().getLong(0)
print("Come in usa"+count_dest_usa)
val total=count_leave_usa+count_dest_usa
print("Total flights of usa"+total)
}
def lineWordCount(text: String): Long={
val word=text.split(" ").map(_.toLowerCase).groupBy(identity).mapValues(_.size)
val counts=word.foldLeft(0){case (a,(k,v))=>a+v}
/* print(word)
print(counts)*/
return counts
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/SparkFileIO.scala | package org.pengfei.spark
import org.apache.spark.{SparkConf, SparkContext}
import scala.util.parsing.json.JSON
object SparkFileIO {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("SparkFileIO").setMaster("local")
val sc = new SparkContext(conf)
val inputFile= "file:///tmp/test.json"
val textFile= sc.textFile(inputFile)
val result= textFile.map(s=>JSON.parseFull(s))
result.foreach( {r => r match {
case Some(map: Map[String, Any]) => println(map)
case None => println("Parsing failed")
case other => println("Unknown data structure: " + other)
}
}
)
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_3_Processing_Data_Stream.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson06_Spark_Streaming
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.streaming.FileStreamSource.Timestamp
import org.apache.spark.sql.functions.current_timestamp
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
object Lesson06_3_Processing_Data_Stream {
/******************************************************************************************************
* *****************************************6.3 Processing Data Stream *********************************
* ******************************************************************************************************/
/*
* An application processes a data stream using the methods defined in the DStream and related classes.
* DStream supports two types of operations: transformation and output operation. The transformations can
* be further classified into
* - basic
* - aggregation
* - key-value
* - special transformation.
*
* Similar to RDD transformations, DStream transformations are lazily computed. No computation takes
* places immediately when a transformation operation is called. An output operation triggers the execution of
* DStream transformation operations. In the absence of an output operation on a DStream, Spark Streaming
* will not do any processing, even if transformations are called on that DStream.*/
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val master="local[2]"
val appName="Lesson6_3_Processing_Data_Stream"
val spark=SparkSession.builder().appName(appName).master(master).getOrCreate()
/*****************************************Basic Transformation**********************************************/
//BasicTransformationExample(spark)
/******************************************Aggregation Transformation***************************************/
//AggregationTransformationExample(spark)
/*******************************************Key value transformation ***************************************/
// KeyValueTransformationExample(spark)
/*******************************************Special transformation ******************************************/
// SpecialTransformationExample(spark)
/*********************************************output operation**************************************************/
// OutputOperationsExample(spark)
/*********************************************Window operation *************************************************/
WindowOperationExample(spark)
}
/******************************************************************************************************
* *****************************************6.3.1 Basic Transformation *********************************
* ******************************************************************************************************/
def BasicTransformationExample(spark:SparkSession):Unit={
val batchInterval=10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val host="localhost"
val port=9999
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val dStreams=ssc.socketTextStream(host,port)
/*
* A transformation applies a user-defined function to each element in a DStream and returns a new DStream.
* DStream transformations are similar to RDD transformations. In fact, Spark Streaming converts a DStream
* transformation method call to a transformation method call on the underlying RDDs. Spark core computes
* the RDD transformations.*/
/**********************************************Map/flatMap****************************************************/
/* The map method takes a function as argument and applies it to each element in the source DStream to create
* a new DStream. It returns a new DStream.
*
* They works like the map/flatMap of RDDs*/
val mapToWords=dStreams.map(_.split(" "))
mapToWords.print()
val flatMapToWords=dStreams.flatMap(_.split(" "))
flatMapToWords.print()
/******************************************filter***********************************************************/
/* The filter method returns a new DStream created by selecting only those element in the source DStream
* for which the user-provided input function returns true.*/
val containsWords=dStreams.filter(_.contains("them"))
containsWords.print()
/*********************************************repartition****************************************************/
/* The repartition method returns a new DStream in which each RDD has the specified number of partitions.
* It allows you to distribute input data stream across a number of machines for processing. It is used to change
* the level of processing parallelism. More partitions increase parallelism, while fewer partitions reduce
* parallelism.*/
dStreams.repartition(6)
/**************************************** Union ****************************************************/
/* The union method returns a new DStream that contains the union of the elements in the source DStream and
* the DStream provided as input to this method*/
/* val steam1 = ...
* val steam2 = ...
* val combinedStream = stream1.union(stream2)*/
ssc.start()
ssc.awaitTermination()
}
/***********************************************************************************************************
* *****************************************6.3.2 Aggregation Transformation *********************************
* ******************************************************************************************************/
def AggregationTransformationExample(spark:SparkSession)={
val batchInterval=10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val host="localhost"
val port=9999
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val dStreams=ssc.socketTextStream(host,port)
/*********************************************count*******************************************************/
/* The count method returns a DStream of single-element RDDs. Each RDD in the returned DStream has the
* count of the elements in the corresponding RDD in the source DStream. */
val countsPerRdd=dStreams.count()
//dStream must have a action such as print to start the transformation
countsPerRdd.print()
//The println only execute once. so we must use print method of the dstream to see the result of each steam(micro batch)
println(s"countsPerRdd value ${countsPerRdd}")
/*********************************************reduce******************************************************/
/* The reduce method returns a DStream of single-element RDDs by reducing the elements in each RDD in the
* source DStream. It takes a user provided reduce function as an argument.*/
val words = dStreams.flatMap(_.split(" "))
val longWord=words.reduce((w1,w2)=> if(w1.length>w2.length) w1 else w2)
longWord.print()
/****************************************countByValue**********************************************************/
/* The countByValue method returns a DStream of key-value pairs, where a key in a pair is a distinct element
* within a batch interval and the value is its count. Thus, each RDD in the returned DStream contains the
* count of each distinct element in the corresponding RDD in the source DStream.*/
val wordCounts=words.countByValue()
wordCounts.print()
ssc.start()
ssc.awaitTermination()
}
/***********************************************************************************************************
* *****************************************6.3.3 Key value Transformation *********************************
* ******************************************************************************************************/
def KeyValueTransformationExample(spark:SparkSession)={
val batchInterval=10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val host="localhost"
val port=9999
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val dStreams=ssc.socketTextStream(host,port)
/* DStreams of key-value pairs support a few other transformations in addition to the transformations available
* on all types of DStreams.*/
/****************************************************cogroup**************************************************/
/* The cogroup method returns a DStream of (K, Seq[V1], Seq[V2]) when called on a DStream of (K, Seq[V1]) and
* (K, Seq[V2]) pairs. It applies a cogroup operation between RDDs of the DStream passed as argument and
* those in the source DStream.
*
* The following example used cogroup method to find the words with the same length from two DStreams*/
/*val lines1 = ssc.socketTextStream("localhost", 9999)
val words1 = lines1 flatMap {line => line.split(" ")}
val wordLenPairs1 = words1 map {w => (w.length, w)}
val wordsByLen1 = wordLenPairs1.groupByKey
val lines2 = ssc.socketTextStream("localhost", 9998)
val words2 = lines2 flatMap {line => line.split(" ")}
val wordLenPairs2 = words2 map {w => (w.length, w)}
val wordsByLen2 = wordLenPairs2.groupByKey
val wordsGroupedByLen = wordsByLen1.cogroup(wordsByLen2)
*/
/***********************************************Join***********************************************************/
/* The join method takes a DStream of key-value pairs as argument and returns a DStream, which is an inner
* join of the source DStream and the DStream provided as input. It returns a DStream of (K, (V1, V2)) when
* called on DStreams of (K, V1) and (K, V2) pairs.
*
* The following example creates two DStreams of lines of text. It then splits them into DStreams of words. Next, it
* creates DStreams of key-value pairs, where a key is the length of a word and value is the word itself. Finally, it
* joins those two DStreams.*/
/*
* val lines1 = ssc.socketTextStream("localhost", 9999)
val words1 = lines1 flatMap {line => line.split(" ")}
val wordLenPairs1 = words1 map {w => (w.length, w)}
val lines2 = ssc.socketTextStream("localhost", 9998)
val words2 = lines2 flatMap {line => line.split(" ")}
val wordLenPairs2 = words2 map {w => (w.length, w)}
val wordsSameLength = wordLenPairs1.join(wordLenPairs2)
*/
/* Left outer, right outer, and full outer join operations are also available. If a DStream of key value pairs
* of type (K, V) is joined with another DStream of pairs of type (K, W), full outer join returns a DStream of
* (K, (Option[V], Option[W])), left outer join returns a DStream of (K, (V, Option[W])), and righter outer join
* returns a DStream of (K, (Option[V], W)).
* */
/*
* val leftOuterJoinDS = wordLenPairs1.leftOuterJoin(wordLenPairs2)
val rightOuterJoinDS = wordLenPairs1.rightOuterJoin(wordLenPairs2)
val fullOuterJoinDS = wordLenPairs1.fullOuterJoin(wordLenPairs2)*/
/**************************************************groupByKey**************************************************/
/* The groupByKey method groups elements within each RDD of a DStream by their keys. It returns a new
* DStream by applying groupByKey to each RDD in the source DStream.*/
val words=dStreams.flatMap(line=>line.split(" "))
val wordLenPairs= words.map(word=>(word.length,word))
val wordsByLen=wordLenPairs.groupByKey()
wordsByLen.print()
/**************************************************reduceByKey**************************************************/
/* The reduceByKey method returns a new DStream of key-value pairs, where the value for each key is obtained by
* applying a user-provided reduce function on all the values for that key within an RDD in the source DStream.
* The following example counts the number of times a word occurs within each DStream micro-batch.
* */
val wordPairs=words.map(w=>(w,1))
val wordCounts = wordPairs.reduceByKey(_ + _)
wordCounts.print()
ssc.start()
ssc.awaitTermination()
}
/***********************************************************************************************************
* *****************************************6.3.4 Special Transformation *********************************
* ******************************************************************************************************/
def SpecialTransformationExample(spark:SparkSession)={
val batchInterval=10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val host="localhost"
val port=9999
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val dStream=ssc.socketTextStream(host,port)
/* The transformations discussed above allow you to specify operations on the elements in a DStream. Under
* the hood, DStream converts them to RDD operations. The next two transformations deviate from this model. */
/*******************************************************Transform***********************************************/
/* The transform method returns a DStream by applying an RDD => RDD function to each RDD in the source
* DStream. It takes as argument a function that takes an RDD as argument and returns an RDD. Thus, it gives
* you direct access to the underlying RDDs of a DStream.
*
* This method allows you to use methods provided by the RDD API, but which do not have equivalent
* operations in the DStream API. For example, sortBy is a transformation available in the RDD API, but not in
* the DStream API. If you want to sort the elements within each RDD of a DStream, you can use the transform
* method as shown in the following example.*/
val words = dStream.flatMap{line => line.split(" ")}
val sorted = words.transform{rdd => rdd.sortBy((w)=> w)}
//sorted.print()
/* The transform method is also useful for applying machine learning and graph computation algorithms to data
* streams. The machine learning and graph processing libraries provide classes and methods that operate at
* the RDD level. Within the transform method, you can use the API provided by these libraries.*/
/***********************************************UpdateStateByKey**********************************************/
/* The updateStateByKey method allows you to create and update states for each key in a DStream of key value
* pairs. You can use this method to maintain any information about each distinct key in a DStream.
*
* For example, you can use the updateStateByKey method to keep a running count of each distinct word
* in a DStream, as shown in the following example.*/
val wordPairs = words.map{word=>(word,1)}
val updateState=(xs:Seq[Int],prevState:Option[Int])=>{
prevState match {
case Some(prevCount)=>Some(prevCount+xs.sum)
case None => Some(xs.sum)
}
}
val runningCount = wordPairs.updateStateByKey(updateState)
runningCount.print()
/* The Spark Streaming library provides multiple overloaded variants of the updateStateByKey method.
* The simplest version of the updateStateByKey method takes a function of type (Seq[V], Option[S]) => Option[S]
* as an argument. This user-provided function takes two arguments. The first argument is a sequence of new
* values for a key in a DStream RDD and the second argument is previous state of the key wrapped in the
* Option data type. The user-provided function updates the state of a key using the new values and previous
* state of a key, and returns the new state wrapped in the Option data type. If the update function returns None
* for a key, Spark Streaming stops maintaining state for that key.
*
* The updateStateByKey method returns a DStream of key-value pairs, where the value in a pair is the
* current state of the key in that pair.*/
ssc.start()
ssc.awaitTermination()
}
/***********************************************************************************************************
* ***************************************** 6.3.5 Output Operations **************************************
* ******************************************************************************************************/
def OutputOperationsExample(spark:SparkSession):Unit={
val batchInterval=10
val ssc= new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val host="localhost"
val port=9999
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val dStream=ssc.socketTextStream(host,port)
/* Output operations are DStream methods that can be used to send DStream data to an output destination.
* An output destination can be a file, database, or another application. Output operations are executed
* sequentially in the order in which they are called by an application.
* */
/**********************************************Saving to a File system*************************************/
/* The commonly used DStream output operations for saving a DStream to file system */
val outputDir="/tmp/spark/output"
val objOutputDir="/tmp/spark/objOutput"
val words=dStream.flatMap(_.split(" "))
val wordPairs=words.map(w=>(w,1))
val wordCounts=wordPairs.reduceByKey(_ + _)
wordCounts.saveAsTextFiles(outputDir)
/********************************************Saving as object Files **************************************/
/* The saveAsObjectFiles method saves DStream elements as serialized objects in binary SequenceFiles.
* Similar to the saveAsTextFile method, it stores the data for each DStream RDD in a separate directory
* and creates a file for each RDD partition. The directory name for each DStream RDD is generated using the
* current timestamp and a user-provided prefix and optional suffix. */
// wordCounts.saveAsObjectFiles(objOutputDir)
/********************************************save as hadoop files *******************************************/
/* The saveAsHadoopFiles method is available on DStreams of key-value pairs. It saves each RDD in the source
* DStream as a Hadoop file. */
/********************************************save as new api hadoop files **************************************/
/* Similar to the saveAsHadoopFiles method, the saveAsNewAPIHadoopFiles method saves each RDD in a
* DStream of key-value pairs as a Hadoop file.*/
/************************************************Displaying on Console****************************************/
/* The DStream class provides the print method for displaying a DStream on the console of the machine where
* the driver program is running.
*
* The print method, prints the elements in each RDD in the source DStream on the
* machine running the driver program. By default, it shows the first ten elements in each RDD. An overloaded
* version of this method allows you to specify the number of elements to print.
* */
/************************************************Saving into a DataBase ****************************************/
/* The foreachRDD method in the DStream class can be used to save the results obtained from processing a
* DStream into a database.
*
* The foreachRDD method is similar to the transform method discussed earlier. It gives you access to the
* RDDs in a DStream. The key difference between transform and foreachRDD is that transform returns a new
* DStream, whereas foreachRDD does not return anything.
*
* The foreachRDD method is a higher-order method that takes as argument a function of type RDD => Unit.
* It applies this function to each RDD in the source DStream. All RDD operations are available to this function.
* It is important to note that the foreachRDD method is executed on the driver node; however, the RDD
* transformations and actions called within foreachRDD are executed on the worker nodes.
*
* Two things have to be kept in mind when saving a DStream into a database. First, creating a database
* connection is an expensive operation. It is recommended not to open and close database connections
* frequently. Ideally, you should re-use a database connection for storing as many elements as possible to
* amortize the cost of creating a connection. Second, a database connection generally cannot be serialized
* and sent from master to worker nodes. Since DStreams are processed on worker nodes, database
* connections should be created on worker nodes.
*
* The RDD foreachPartition action can be used for storing multiple DStream elements using the same
* database connection. Since the foreachRDD DStream method gives you access to all RDD operations, you
* can call the foreachPartition RDD method within foreachRDD. Within foreachPartition, you can open a
* database connection and use that connection to store all elements in the source RDD partition. You can further
* optimize by using a connection pool library instead of opening and closing a physical connection directly.
* The following code snippet implements the approach described earlier for saving a DStream to a
* database. It assumes that the application is using a connection pool library such as HikariCP or BoneCP. The
* connection pool library is wrapped in a lazily initialized singleton object named ConnectionPool, which
* manages a pool of database connections.
*
* If you know the receiving data structure, you can also use rdd and dataframe api to save the data into the
* database.
* */
// val structruredDStream=wordCounts.map{(word:String,count:Int)=>WordCounts(word,count)}
/*
wordCounts.foreachRDD{ rdd=>
}
}*/
/* Another optimization that you can do is batch the database writes. So instead of sending one database
* write per element, you can batch all the inserts for an RDD partition and send just one batch update to the
* database per RDD partition.
* The foreachRDD method comes handy for not only saving a DStream to a database, but it is also useful
* for displaying the elements in a DStream in a custom format on the driver node.*/
ssc.start()
ssc.awaitTermination()
}
case class WordCounts(word:String,count:Int)
/***********************************************************************************************************
* ***************************************** 6.3.6 Window Operations **************************************
* ******************************************************************************************************/
def WindowOperationExample(spark:SparkSession):Unit={
val batchInterval= 10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val host="localhost"
val port=9999
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val dStream=ssc.socketTextStream(host,port)
/* A window operation is a DStream operation that is applied over a sliding window of data in a stream.
* Successive windows have one or more overlapping RDDs (see Figure 6-5 page 94). A window operation is a stateful
* DStream operation that combines data across multiple batches.
*
* A window operation requires two parameters (see Figure 6-6 page 94):
* - window length : specifies the time duration over which a window operation is applied.
* - sliding interval : specifies the time interval at which a window operation is performed. It is the time interval
* at which new RDDs are generated by a window operation.
*
* Important note : Both the window length and sliding interval parameters must be a multiple of a DStream’s
* batch interval.
*
* */
/************************************* Window method *****************************************/
/* The window method returns a DStream of sliding RDDs. It takes two arguments, window duration and sliding
* interval. Each RDD in the returned DStream includes elements from the source DStream for the specified
* duration and a new RDD is generated at the specified time interval. Successive RDDs in the returned
* DStream have overlapping data elements.*/
// WindowMethod(dStream)
/******************************************CountByWindow*******************************************/
/* The countByWindow method returns a DStream of single-element RDDs. The single element in each
* returned DStream RDD is the count of the elements in a sliding window of a specified duration. It takes two
* arguments, window duration, and sliding interval.*/
// CountByWindowMethod(dStream)
/****************************************countByValueAndWindow*******************************/
/* The countByValueAndWindow method returns a DStream containing the counts of each distinct element
* within a sliding window that slides at the specified time interval.*/
// CountByValueAndWindow(dStream)
/**************************************** reduceByWindow ***************************************/
/* The reduceByWindow method returns a DStream of single-element RDDs. Each RDD in the returned DStream
* is generated by applying a user-provided reduce function over the DStream elements in a sliding window.
* The reduceByWindow method takes three arguments: reduce function, window duration, and sliding interval.
*
* The user-provided reduce function must be of type (T, T) => T. It takes two arguments of type T and returns
* a single value of type T. This function is applied to all the elements within a window to generate a
* single value. It can be used to aggregate elements within each sliding window.*/
// ReduceByWindowMethod(dStream)
/******************************************reduceByKeyAndWindow*********************************/
/* The reduceByKeyAndWindow operation is available only for DStreams of key-value pairs. It is similar to
* reduceByWindow, except that it does the same thing for a DStream of key-value pairs. It applies a user provided
* reduce function to key-value pairs in a sliding DStream window to generate single key-value pair
* for each distinct key within a window.*/
ReduceByKeyAndWindow(dStream)
/******************************reduceByKeyAndWindow with invFunc************************************/
/* In a windowing operation, each new window overlaps with previous window. It adds some elements
* to and removes some from the previous window. For example, if the window duration is 60 seconds and
* sliding interval is 10 seconds, each new window removes 10 seconds of data from previous window and adds
* 10 seconds of new data. Successive windows share 40 seconds of data. Performing complete aggregation
* over 60 seconds for every window is inefficient. A more efficient approach is to add the aggregate for the 10
* seconds of new data to the previous window’s result and remove the aggregate for the 10 seconds of data that
* is no longer in the new window.
*
* Spark Streaming provides an efficient variant of the reduceByKeyAndWindow operation, which
* incrementally updates a sliding window by using the reduced value of the predecessor window. It requires
* n additional inverse reduce function as an argument. It reduces the new values that enter a windows and
* uses the inverse reduce function to remove the values that left the window.
*
* Note that the reduce function must have a corresponding "inverse reduce" function. - is the inverse function of +*/
ssc.start()
ssc.awaitTermination()
}
def WindowMethod(dStream:DStream[String]):Unit={
val words:DStream[String] = dStream.flatMap(line=>line.split(" "))
val windowLen=40
val slidingInterval = 20
val window = words.window(Seconds(windowLen),Seconds(slidingInterval))
val longestWord = window.reduce{(word1,word2)=> if (word1.length > word2.length) word1 else word2}
longestWord.print()
}
def CountByWindowMethod(dStream: DStream[String]):Unit={
val words:DStream[String] = dStream.flatMap(line=>line.split(" "))
val windowLen=40
val slidingInterval = 20
val countByWindow = words.countByWindow(Seconds(windowLen),Seconds(slidingInterval))
countByWindow.print()
}
def CountByValueAndWindow(dStream: DStream[String]):Unit={
val words:DStream[String] = dStream.flatMap(line=>line.split(" "))
val windowLen=40
val slidingInterval = 20
val countByValueAndWindow = words.countByValueAndWindow(Seconds(windowLen),Seconds(slidingInterval))
countByValueAndWindow.print()
}
def ReduceByWindowMethod(dStream: DStream[String]):Unit={
val numbers=dStream.flatMap(_.split(" ")).map(x=>x.toInt)
val windowLen=30
val slidingInterval = 10
val sumLast30Seconds = numbers.reduceByWindow({(n1,n2)=>n1+n2},Seconds(windowLen),Seconds(slidingInterval))
sumLast30Seconds.print()
}
def ReduceByKeyAndWindow(dStream: DStream[String]):Unit={
val words:DStream[String]=dStream.flatMap(line=>line.split(" "))
val wordPair=words.map{word=>(word,1)}
val windowLen=40
val slidingInterval = 10
val Last40SecondsWordCounts=wordPair.reduceByKeyAndWindow((count1:Int,count2:Int)=>count1+count2,
Seconds(windowLen),Seconds(slidingInterval))
Last40SecondsWordCounts.print()
}
def SmartReduceByKeyAndWindow(dStream: DStream[String]):Unit={
val words = dStream flatMap {line => line.split(" ")}
val wordPairs = words map {word => (word, 1)}
val windowLen = 30
val slidingInterval = 10
def add(x: Int, y: Int): Int = x + y
def sub(x: Int, y: Int): Int = x-y
// val wordCountLast30Seconds = wordPairs.reduceByKeyAndWindow(add, sub, Seconds(windowLen), Seconds(slidingInterval))
// wordCountLast30Seconds.print()
}
/***********************************************************************************************************
* ***************************************** 6.3.7 Caching/persistence **************************************
* ******************************************************************************************************/
/* Similar to RDDs, DStreams also allow developers to persist the stream’s data in memory. That is, using the
* persist() method on a DStream will automatically persist every RDD of that DStream in memory. This is useful
* if the data in the DStream will be computed multiple times (e.g., multiple operations on the same data).
* For window-based operations like reduceByWindow and reduceByKeyAndWindow and state-based operations
* like updateStateByKey, this is implicitly true. Hence, DStreams generated by window-based operations
* are automatically persisted in memory, without the developer calling persist().
*
* For input streams that receive data over the network (such as, Kafka, Flume, sockets, etc.), the default
* persistence level is set to replicate the data to two nodes for fault-tolerance.
*
* Note that, unlike RDDs, the default persistence level of DStreams keeps the data serialized in memory.
* This is further discussed in the Performance Tuning section. More information on different persistence levels
* can be found in the Spark Programming Guide StorageLevel.
* */
/***********************************************************************************************************
* ********************************** 6.3.8 Handling late data and Watermarking ****************************
* ******************************************************************************************************/
/*
* You can find complete guide in https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html
* We have seen how to use window method to group dstream. Now consider what happens if one of the events arrives
* late to the application. For example, say, a word generated at 12:04 (i.e. event time) could be received by
* the application at 12:11. The application should use the time 12:04 instead of 12:11 to update the older
* counts for the window 12:00 - 12:10. This occurs naturally in our window-based grouping – Structured Streaming
* can maintain the intermediate state for partial aggregates for a long period of time such that late data can
* update aggregates of old windows correctly, as illustrated below.
*
* However, to run this query for days, it’s necessary for the system to bound the amount of intermediate
* in-memory state it accumulates. This means the system needs to know when an old aggregate can be dropped
* from the in-memory state because the application is not going to receive late data for that aggregate any more.
* To enable this, in Spark 2.1, we have introduced watermarking, which lets the engine automatically track the
* current event time in the data and attempt to clean up old state accordingly. You can define the watermark of
* a query by specifying the event time column and the threshold on how late the data is expected to be in terms
* of event time. For a specific window starting at time T, the engine will maintain state and allow late data to
* update the state until (max event time seen by the engine - late threshold > T). In other words, late data within
* the threshold will be aggregated, but data later than the threshold will start getting dropped
* (see later in the section for the exact guarantees). Let’s understand this with an example. We can easily
* define watermarking on the previous example using withWatermark() as shown below.*/
/*def WaterMarkExample(spark:SparkSession)={
import spark.implicits._
val batchInterval=10
val ssc=new StreamingContext(spark.sparkContext,Seconds(batchInterval))
val checkPointPath="/tmp/spark/check-point"
ssc.checkpoint(checkPointPath)
val host="localhost"
val port=9999
val dStream=ssc.socketTextStream(host,port)
val words=dStream.flatMap(_.split(" "))
val wordsStructured=words.map{w:String=>Words(w)}
val wordDf=wordsStructured.foreachRDD(word=>word.toDF())
val wordWithTimeDf=wordDf.withColumn("currentTimestamp",current_timestamp())
// Group the data by window and word and compute the count of each group
val windowedCounts = words
.withWatermark("timestamp", "10 minutes")
.groupBy(
window($"timestamp", "10 minutes", "5 minutes"), $"word")
.count()
}*/
/*def getCurrentTime():Timestamp={
}*/
case class Words(word:String)
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson03_Spark_Application/Lesson03_1_Spark_Shell.scala | <gh_stars>0
package org.pengfei.Lesson03_Spark_Application
object Lesson03_1_Spark_Shell {
/***********************************************************************************/
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson05_Spark_ML/Lesson05_2_1_2_Classification_Algo.scala | package org.pengfei.Lesson05_Spark_ML
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
object Lesson05_2_1_2_Classification_Algo {
/*****************************************************************************************************
*************************************5.2.1.2 Classification Algo ***********************************
* *************************************************************************************************/
/*
* Classification algorithms train models that predict categorical values. The dependent or response variable in
* the training dataset is a categorical variable. In other words, the label is a categorical variable.
* The model trained by a classification algorithm can be a binary, multi-class, or multi-label classifier.
*
* A binary classifier classifies observations into two categories: positive or negative. The predicted label
* has only two classes.
*
* A multi-class classifier predicts a label that can have more than two classes. For example, a multi-class
* classifier can be used to classify images of animals. The label in this example can be cat, dog, hamster, lion,
* or some other animal.
*
* A multi-label classifier can output more than one label for the same observation. For example, a
* classifier that categorizes news articles can output more than one label for an article that is related to both
* sports and business.
*
* The commonly used supervised machine learning algorithms for classification tasks include the
* following algorithms.
* - Logistic Regression
* - Support Vector Machine(SVM)
* - Naive Bayes
* - Decision Trees
* - Random Forest
* - Gradient-Boosted Trees
* - Neural Network*/
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson5_2_1_1_Regression_Algo").getOrCreate()
/*************************************5.2.1.2.1 Logistic Regression***********************************/
}
/*****************************************************************************************************
*************************************5.2.1.2.1 Logistic Regression ***********************************
* *************************************************************************************************/
def LogisticRegressionExample(spark:SparkSession):Unit={
/* The logistic regression algorithm trains a linear model that can be used for classification tasks. Specifically,
* the generated model can be used for predicting the probability of occurrence of an event.
*
* Logistic regression uses a logistic or sigmoid function to model the probabilities for the possible labels
* of an unlabeled observation. With any input, the sigmoid function will always return a value between 0 and 1.
* */
}
/*****************************************************************************************************
*************************************5.2.1.2.2 Support Vector Machine ********************************
* *************************************************************************************************/
def SupportVectorMachineExample(spark:SparkSession):Unit={
/* The Support Vector Machine (SVM) algorithm trains an optimal classifier. Conceptually, it learns from a
* training dataset an optimal hyperplane (see Figure 8-5, page 163) for classifying a dataset. It finds the
* best hyperplane that separates training observations of one class from those of the other class. The support
* vectors are the feature vectors that are closest to the separating hyperplane.
*
* The best hyperplane is the one with the largest margin between two classes of observations. Margin in
* this context is the width of a slab that cleanly separates the observations in the training set. In other words,
* the margin between the separating hyperplane and the nearest feature vectors from both classes is maximal.
* The diagram in Figure 8-5 illustrates this point.
*
* SVM can be used as a kernel-based method. A kernel-based method implicitly maps feature vectors
* into a higher-dimensional space where it is easier to find an optimal hyperplane for classifying observations
* (see Figure 8-6). For example, it may be difficult to find a hyperplane that separates positive and negative
* examples in a two-dimensional space. However, if the same data is mapped to a three or higher dimensional
* space, it may be easier to find a hyperplane that cleanly separates the positive and negative observations.
* The Figure 8-6 illustrates this approach.
*
* A kernel based method uses a kernel function, which is a similarity function. The kernel function takes
* two observations as input and outputs their similarity.
*
* SVM is a powerful algorithm, but also more compute-intensive than some of the less sophisticated
* classification algorithms. One of the advantages of SVM is that it works well on datasets that are not linearly
* separable.*/
}
/*****************************************************************************************************
*************************************5.2.1.2.3 Naive Bayes ***************************************
* *************************************************************************************************/
/* The Naïve Bayes algorithm uses Bayes theorem to train a classifier. The model trained by the Naïve Bayes
* algorithm is a probabilistic classifier. For a given observation, it calculates a probability distribution over a
* set of classes.
*
* Bayes theorem describes the conditional or posterior probability of an event. The mathematical
* equation for Bayes theorem is shown next. P(A|B)=(P(B|A).P(A))/P(B)
*
* In the preceding equation, A and B are events. P(A|B)is the posterior or conditional probability of A
* knowing that B has occurred. P(B|A) is the posterior probability of B given that A has occurred. P(A) and P(B)
* are the prior probabilities of A and B respectively.
*
* The Naive Bayes algorithm assumes that all the features or predictor variables are independent. That
* is the reason it is called naïve. In theory, the Naive Bayes algorithm should be used only if the predictor
* variables are statistically independent; however, in practice, it works even when the independence
* assumption is not valid.
*
* Naïve Bayes is particularly suited for high dimensional datasets. Although it is a simple algorithm, it
* often outperforms more sophisticated classification algorithms.*/
def NaiveBayesExample(spark:SparkSession):Unit={
}
/*****************************************************************************************************
*************************************5.2.1.2.4 Trees ***************************************
* *************************************************************************************************/
/* Decision trees, random forest, gradient boosted can also be used for classification problem.
*
* For regression tasks, each terminal node stores a numeric value; whereas for classification tasks, each
* terminal node stores a class label. Multiple leaves may have the same class label. To predict a label for an
* observation, a decision tree model starts at the root node of a decision tree and tests the features against the
* internal nodes until it arrives at a leaf node. The value at the leaf node is the predicted label.
* */
/*****************************************************************************************************
*************************************5.2.1.2.5 NeuralNetwork ***************************************
* *************************************************************************************************/
def NeuralNetworkExample(spark:SparkSession):Unit={
/*
* Neural Network algorithms are inspired by biological neural networks. They try to mimic the brain. A commonly
* used neural network algorithm for classification tasks is the feedforward neural network. A classifier trained by
* the feedforward neural network algorithm is also known as a multi-layer perceptron classifier.
*
* A multi-layer perceptron classifier consists of interconnected nodes (Figure 8-7,Page 166). A node is also
* referred to as a unit. The network of interconnected nodes is divided into multiple layers.
*
* The first layer consists of the inputs to the classifier. It represents the features of an observation. Thus,
* the number of the nodes in the first layer is same as the number of input features.
*
* The input layer is followed by one or more hidden layers. A neural network with two or more hidden
* layer is known as a deep neural network. Deep learning algorithms, which have recently become popular
* again, train models with multiple hidden layers.
*
* A hidden layer can consist of any number of nodes or units. Generally, the predictive performance
* improves with more number of nodes in a hidden layer. Each node in a hidden layer accepts inputs from the
* all the nodes in the previous layer and produces an output value using an activation function.
*
* The activation function generally used is the logistic (sigmoid) function. Thus, a single-layer
* feedforward neural network with no hidden layers is identical to a logistic regression model.
*
* The last layer, also known as output units, represents label classes. The number of nodes in the output
* layer depends on the number of label classes. A binary classifier will have one node in the output layer. A
* k-class classifier will have k output nodes.
*
* In a feedforward neural network, input data flows only forward from the input layer to the output layer
* through the hidden layers. There are no cycles.
*
* The Figure 8-7 shows a feedforward neural network. The feedforward neural network algorithm uses a technique
* known as backpropagation to train a model. During the training phase, prediction errors are fed back to
* the network. The algorithm uses this information for adjusting the weights of the edges connecting the nodes
* to minimize prediction errors. This process is repeated until the prediction errors converge to value less
* than a predefined threshold.
*
* Generally, a neural network with one layer is sufficient in most cases. If more than one hidden layers are
* used, it is recommended to have the same number of nodes in each hidden layer.
* Neural networks are better suited for classifying data that is not linearly separable. An example of a
* classification tasks involving non-linear data is shown in Figure 8-8(page 167).
*
* Neural networks have a few disadvantages. They are difficult to interpret. It is hard to explain what the
* nodes in the hidden layers represent. In addition, neural network algorithms are more compute intensive
* than simpler classification algorithms such as logistic regression.*/
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson17_Analyze_Clinical_Data/Lesson17_Get_Specific_Row_Of_Column_Group.scala | package org.pengfei.Lesson17_Analyze_Clinical_Data
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions.Window
object Lesson17_Get_Specific_Row_Of_Column_Group {
/* In this section, we will show you how to get specific rows of each groups after a groupby. We suppose */
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().appName("Lesson17_Appendix").master("local[2]").getOrCreate()
import spark.implicits._
val df = spark.sparkContext.parallelize(Seq(
(0,"cat26",30.9), (0,"cat13",22.1), (0,"cat95",19.6), (0,"cat105",1.3),
(1,"cat67",28.5), (1,"cat4",26.8), (1,"cat13",12.6), (1,"cat23",5.3),
(2,"cat56",39.6), (2,"cat40",29.7), (2,"cat187",27.9), (2,"cat68",9.8),
(3,"cat8",35.6))).toDF("Hour", "Category", "TotalValue")
df.orderBy($"TotalValue".desc).show(10)
// we can't do orderBy or sortBy after groupBy, if we do orderBy and sortBy before, the groupBy will
// not preserve the order, so the following code is wrong, sometimes it may give you right answer
// but there is no guarantee.
val maxValueOfEachHour=df.orderBy($"TotalValue".desc).groupBy($"Hour").agg(first("Hour").as("Hour"),
first("Category").as("Category"),
first("TotalValue").as("TotalValue"))
maxValueOfEachHour.orderBy($"Hour".desc).show(5)
/* Solution 1 : Use window function
*
* 1. Create window specification partitionBy hour and orderby TotalValue as desc, so the first row is the biggest
* 2. use function row_numb to create a new column row_number
* 3. filter the row with row_number=1
* */
/* val w=Window.partitionBy($"Hour").orderBy($"TotalValue".desc)
val windowSolution = df.withColumn("rn", row_number.over(w)).where($"rn" === 1)
windowSolution.show(5)*/
/* Solution 2 : Use dataSet Api,
# In the following code, we use groupByKey to group row(Record) by using Record attributes
* Hour, then we use reduceGroups to reduce all elements in the same group after groupBy.
* The reduceGroups takes a list of Record of the same group, and return one Record.
*
* This method can leverage map side combine and don't require full shuffle so most of the time should exhibit a
* better performance compared to window functions and joins. These cane be also used with Structured Streaming
* in completed output mode.
* */
/* val goodSolution=df.as[Record].groupByKey(_.Hour).reduceGroups((x,y)=>if (x.TotalValue > y.TotalValue) x else y)
goodSolution.show(5)*/
/* Solution 3 : Use sql aggregation and join
*
* 1. We do a groupBy over column hour, then aggregate on column TotalValue with max to get the max value of each hour
* 2. Then we join with original on which hour==max_hour && TotalValue=max_value*/
val dfMax = df.groupBy($"hour".as("max_hour")).agg(max($"TotalValue").as("max_value"))
//dfMax.show(5)
val joinSolution=df.join(broadcast(dfMax),($"hour"===$"max_hour")&&($"TotalValue"===$"max_value"))
.drop("max_hour")
.drop("max_value")
joinSolution.show(5)
}
case class Record(Hour: Integer, Category: String, TotalValue: Double)
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/streaming/kafka/KafkaWordCount.scala | <gh_stars>0
package org.pengfei.spark.streaming.kafka
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
object KafkaWordCount {
def main(args:Array[String]){
//StreamingExamples.setStreamingLogLevels()
val master = "spark://hadoop-nn.bioaster.org:7077"
val sc = new SparkConf().setAppName("KafkaWordCount").setMaster(master)
val ssc = new StreamingContext(sc,Seconds(10))
//Set the context to periodically checkpoint the DStream operations for spark master fault-tolerance.
//The checkpoint can be write on local file as in the example
// It can be also write on hdfs,ssc.checkpoint("/user/hadoop/checkpoint")
// make sure your hdfs is running
ssc.checkpoint("file:///tmp/kafka/checkpoint")
val zkQuorum = "hadoop-nn.bioaster.org:2181" //Zookeeper server url
val group = "1" //set topic group, for example val group = "test-consumer-group"
val topics = "Hello-Kafka" //topics name
val numThreads = 3 //set topic partition number
val topicMap =topics.split(",").map((_,numThreads.toInt)).toMap
val lineMap = KafkaUtils.createStream(ssc,zkQuorum,group,topicMap)
val lines = lineMap.map(_._2)
val words = lines.flatMap(_.split(" "))
val pair = words.map(x => (x,1))
val wordCounts = pair.reduceByKeyAndWindow(_ + _,_ - _,Minutes(2),Seconds(10),2)
wordCounts.print
ssc.start
ssc.awaitTermination
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_5_6_Structure_Streaming_Kafka_Integrations.scala | <reponame>pengfei99/Spark
package org.pengfei.Lesson06_Spark_Streaming
import org.apache.spark.sql.SparkSession
object Lesson06_5_6_Structure_Streaming_Kafka_Integrations {
def main(args:Array[String])={
}
/*****************************************************************************************************************
* ***************************** 6.5.6 Spark Structure Streaming Kafka Integration *******************************
* ***************************************************************************************************************/
/* To use kafka input source in structured streaming, you need to add the following dependencies in your pom.xml(maven)
* groupId = org.apache.spark
* artifactId = spark-sql-kafka-0-10_2.11
* version = 2.3.1
*
* You can find the official doc here :
* https://spark.apache.org/docs/latest/structured-streaming-kafka-integration.html
* */
/********************************** 6.5.6.1 Kafka source for streaming queries ******************************/
def StreamingQueryExample(spark:SparkSession):Unit={
MultipleTopicsExample(spark)
//
PatternOfTopicsExample(spark)
}
/*****************************subscribe to multiple topics **************************************/
def MultipleTopicsExample(spark:SparkSession):Unit={
// create a stream dataframe which subscribe to two topics "news" and "movies"of kafka stream on server
// host1/2 and port1/2.
val df=spark.readStream.format("kafka")
.option("kafka.bootstrap.servers","host1:port1,host2:port2")
.option("subscribe","news,movies")
.load()
/*df.selectExpr("CAST(key AS STRING)","CAST(value AS STRING)").as[(String,String)]*/
}
/***************************subscribe to pattern of topics **********************************/
def PatternOfTopicsExample(spark:SparkSession):Unit={
val df=spark.readStream.format("kafka")
.option("kafka.bootstrap.servers","host1:port1,host2:port2")
.option("subscribePattern","topic.*")
.load()
}
/********************************** 6.5.6.2 Kafka source for batch queries ******************************/
def BatchQueryExample(spark:SparkSession):Unit={
// OneTopicDefaultOffSet(spark)
MultTopicWithOffSet(spark)
PatternTopicWithOffSet(spark)
}
/*************************** subscribe to 1 topic defaults to the earliest and latest offsets ******************/
def OneTopicDefaultOffSet(spark:SparkSession):Unit={
//By default, if we don't specify the start/ending offset, spark will take the earliest to the latest
val df = spark
.read
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.option("subscribe", "topic1")
.load()
/* df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]*/
}
/*********************** subscribe to multiple topics, specifying explicit kafka offsets *************************/
def MultTopicWithOffSet(spark:SparkSession):Unit={
// Subscribe to multiple topics, specifying explicit Kafka offsets
val df = spark
.read
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.option("subscribe", "topic1,topic2")
.option("startingOffsets", """{"topic1":{"0":23,"1":-2},"topic2":{"0":-2}}""")
.option("endingOffsets", """{"topic1":{"0":50,"1":-1},"topic2":{"0":-1}}""")
.load()
/*df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]*/
}
/********************** subscribe to a pattern topics, at the earliest and latest offsets ************************/
def PatternTopicWithOffSet(spark:SparkSession):Unit={
// Subscribe to a pattern, at the earliest and latest offsets
val df = spark
.read
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.option("subscribePattern", "topic.*")
.option("startingOffsets", "earliest")
.option("endingOffsets", "latest")
.load()
/*df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]*/
}
/* Each row in the source has the following schema:
- Column Type
- key binary
- value binary
- topic string
- partition int
- offset long
- timestamp long
- timestampType int
*/
/* The following options must be set for the Kafka source for both batch and streaming queries.
*
Option | value | meaning
assign | json string {"topicA":[0,1],"topicB":[2,4]} | Specific TopicPartitions to consume. Only one of "assign", "subscribe" or "subscribePattern" options can be specified for Kafka source.
subscribe | A comma-separated list of topics | The topic list to subscribe. Only one of "assign", "subscribe" or "subscribePattern" options can be specified for Kafka source.
subscribePattern | Java regex string | The pattern used to subscribe to topic(s). Only one of "assign, "subscribe" or "subscribePattern" options can be specified for Kafka source.
kafka.bootstrap.servers | A comma-separated list of host:port | The Kafka "bootstrap.servers" configuration.
*/
/*****************************************************************************************************************
* ***************************** 6.5.6.3 Writing data to kafka *************************************************
* ***************************************************************************************************************/
/* We write both streaming query and batch query to Kafka. Take note that Apache Kafka only supports at least once
* write semantics. Consequently, when writing—either Streaming Queries or Batch Queries—to Kafka, some records may
* be duplicated;
*
* This can happen, for example, if Kafka needs to retry a message that was not acknowledged by a Broker, even though
* that Broker received and wrote the message record. Structured Streaming cannot prevent such duplicates from
* occurring due to these Kafka write semantics. However, if writing the query is successful, then you can assume
* that the query output was written at least once. A possible solution to remove duplicates when reading the written
* data could be to introduce a primary (unique) key that can be used to perform de-duplication when reading.
*
* The Dataframe being written to Kafka should have the following columns in schema:
* Column | Type
* key (optional) | string or binary
* value (required) | string or binary
* topic (*optional) | string
* The topic column is required if the “topic” configuration option is not specified in the writeStream.
* The value column is the only required option. If a key column is not specified then a null valued key column
* will be automatically added (see Kafka semantics on how null valued key values are handled). If a topic column
* exists then its value is used as the topic when writing the given row to Kafka, unless the “topic” configuration
* option is set i.e., the “topic” configuration option overrides the topic column.
*/
/**************************************6.5.6.3.1 Write Stream Query to Kafka **************************/
def WriteStreamQueryToKafkaExample(spark:SparkSession):Unit={
TopicInDFExample(spark)
}
def TopicInDFExample(spark:SparkSession):Unit={
import spark.implicits._
val df=spark.sparkContext.parallelize(List(
kafkaMessage("user","name","haha"),
kafkaMessage("user","age","32"),
kafkaMessage("user","name","foo"),
kafkaMessage("user","age","22")
)).toDF()
// Write key-value data from a DataFrame to Kafka using a topic specified in the data
val ds = df
.selectExpr("topic", "CAST(key AS STRING)", "CAST(value AS STRING)")
.writeStream
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.start()
}
def TopicInOptionExample(spark:SparkSession):Unit={
import spark.implicits._
val df=spark.sparkContext.parallelize(List(
Message("name","haha"),
Message("age","32"),
Message("name","foo"),
Message("age","22")
)).toDF()
// Write key-value data from a DataFrame to a specific Kafka topic specified in an option
val ds = df
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.writeStream
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.option("topic", "user")
.start()
}
/************************************** 6.5.6.3.2 Write Batch Query to Kafka *********************************/
def WriteBatchQueryToKafkaExample(spark:SparkSession):Unit={
}
def BatchTopicInOption(spark:SparkSession):Unit={
import spark.implicits._
val df=spark.sparkContext.parallelize(List(
Message("name","haha"),
Message("age","32"),
Message("name","foo"),
Message("age","22")
)).toDF()
// Write key-value data from a DataFrame to a specific Kafka topic specified in an option
df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.write
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.option("topic", "topic1")
.save()
}
def BatchTopicInDF(spark:SparkSession):Unit={
import spark.implicits._
val df=spark.sparkContext.parallelize(List(
kafkaMessage("user","name","haha"),
kafkaMessage("user","age","32"),
kafkaMessage("user","name","foo"),
kafkaMessage("user","age","22")
)).toDF()
// Write key-value data from a DataFrame to Kafka using a topic specified in the data
df.selectExpr("topic", "CAST(key AS STRING)", "CAST(value AS STRING)")
.write
.format("kafka")
.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
.save()
}
case class kafkaMessage(topic:String,key:String,value:String)
case class Message(key:String,value:String)
/*****************************************************************************************************************
* ***************************** 6.5.6.4 kafka Specific configurations *************************************************
* ***************************************************************************************************************/
/*
* Kafka’s own configurations can be set via DataStreamReader.option with kafka. prefix, e.g,
* stream.option("kafka.bootstrap.servers", "host:port"). For possible kafka parameters, see Kafka consumer config
* docs for parameters related to reading data, and Kafka producer config docs for parameters related to writing data.
*
* Note that the following Kafka params cannot be set and the Kafka source or sink will throw an exception:
* - group.id: Kafka source will create a unique group id for each query automatically.
*
* - auto.offset.reset: Set the source option startingOffsets to specify where to start instead. Structured Streaming
* manages which offsets are consumed internally, rather than rely on the kafka Consumer to do it.
* This will ensure that no data is missed when new topics/partitions are dynamically subscribed.
* Note that startingOffsets only applies when a new streaming query is started, and that
* resuming will always pick up from where the query left off.
*
* - key.deserializer: Keys are always deserialized as byte arrays with ByteArrayDeserializer. Use DataFrame
* operations to explicitly deserialize the keys.
*
* - value.deserializer: Values are always deserialized as byte arrays with ByteArrayDeserializer. Use DataFrame
* operations to explicitly deserialize the values.
*
* - key.serializer: Keys are always serialized with ByteArraySerializer or StringSerializer. Use DataFrame
* operations to explicitly serialize the keys into either strings or byte arrays.
*
* - value.serializer: values are always serialized with ByteArraySerializer or StringSerializer. Use DataFrame
* operations to explicitly serialize the values into either strings or byte arrays.
*
* - enable.auto.commit: Kafka source doesn’t commit any offset.
*
* - interceptor.classes: Kafka source always read keys and values as byte arrays. It’s not safe to use
* ConsumerInterceptor as it may break the query.
*
* */
/*****************************************************************************************************************
* ****************************************** 6.5.6.5 Deploying *************************************************
* ***************************************************************************************************************/
/* As with any Spark applications, spark-submit is used to launch your application. spark-sql-kafka-0-10_2.11 and
* its dependencies can be directly added to spark-submit using --packages, such as,
*
* ./bin/spark-submit --packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.1 ...
*
* For experimenting on spark-shell, you can also use --packages to add spark-sql-kafka-0-10_2.11 and its
* dependencies directly,
*
* ./bin/spark-shell --packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.1 ...
*
* See Application Submission Guide for more details about submitting applications with external dependencies.*/
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson10_Spark_Application_ETL/Lesson10_2_data_cleaning.scala | package org.pengfei.Lesson10_Spark_Application_ETL
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}
object Lesson10_2_data_cleaning {
/** ***********************************************************************************************
* ***********************************10.2 Data cleaning with Spark ********************************
* **********************************************************************************************/
/*********************************10.2.1 Get basic data quality stats *********************************************/
/* If we have many null cells in a data set, and null is not expected. We can say that data quality is low. In spark
* we have isNull, isNotNull, isNaN to detect null values and not a number.
*
* Null values represents "no value" or "nothing", it's not even an empty string or zero. It can be used to represent
* that nothing useful exists.
*
* NaN stands for "Not a Number", it's usually the result of a mathematical operation that doesn't make sense,
* e.g. 0.0/0.0 returns true. "toto" returns false. As a result, we can't use it to determine if a int column
* contains not digit value or not. We need to write our UDF to do that.
*
*
* */
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().appName("Lesson10_2_data_cleaning").master("local[2]").getOrCreate()
import spark.implicits._
/* //Test of isDigit function
val val1 = "-1.1"
val val2 = "22"
val val3 = "1,1"
println(s"$val1 is digit ${isDigit(val1)}")
println(s"$val2 is digit ${isDigit(val2)}")
println(s"$val3 is digit ${isDigit(val3)}")*/
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path = sparkConfig.getString("sourceDataPath")
val filePath = s"${path}/spark_lessons/Lesson10_Spark_Application_ETL/data_cleaning"
val fileName = "/error1.csv"
val error1Schema = StructType(Array(
StructField("City", StringType, true),
StructField("Year", IntegerType, true),
StructField("Income", DoubleType, true)))
// when we inferSchema, the digit column is automatically turn into integer, if you have 10.0 or space before 10.
// The column type is inferred automatically into double.
val error1Df = spark.read.option("header", "false").option("nullValue", "?").option("inferSchema", "true").csv(filePath + fileName).toDF("City", "Year", "Income")
// when we give an explicit schema, everything is ok. Now try to modify the csv file, in the year column add some
// string, and check the data frame result. You can notice, the line which contains year string value returns null
// in all columns of the data frame
// val error1Df=spark.read.option("header","false").schema(error1Schema).csv(filePath+fileName)
error1Df.show()
// error1Df.printSchema()
/*********************************10.2.1.1 Count null cells *********************************************/
/*
* Null value exists almost in all data sets, We need to detect them, count them. Check the getNullCountForAllCols
* function to see how we detect null values. The following code is an example
* */
val nullCountDF=getNullCountForAllCols(error1Df)
nullCountDF.show(nullCountDF.count().toInt,false)
/*********************************10.2.1.2 Count notDigit cells *********************************************/
/* In a column, if most of the cells has only digit values, we should convert this columns into digit column
* Because we have better arithmetic functions support. The cell value which is not digits in these kind of
* column may be caused by errors. We need to detect them and count them. Sometimes, even show the value.
*
* Note we consider null as valid digit value.
*/
/* Check getIsDigitDF function, we use a user define function to create a new data frame, Following code is an
* example*/
val isDigitDf = getIsDigitDF(error1Df, List("Year", "Income"))
isDigitDf.show()
/* With the new data frame produced, we can count how many bad values in a column, we can also show the lines
* which contains the bad values
* */
val badValues=getIsNotDigitCount(isDigitDf)
badValues.show()
/* based on the output of badValues, we can determine column Year and Income contains bad values, and
* we can use showNotDigitValues function to get the distinct bad values*/
showNotDigitValues(isDigitDf,List("Year","Income"))
/*
* We can also get a data frame which contains*/
val badValueFullLines=showLinesWithBadValue(isDigitDf)
badValueFullLines.show(5,false)
}
/**
* This function counts the null cell number
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colName second column value to be merged
* @return Long, It returns the number of null cell
**/
def getNullCount(df: DataFrame, colName: String): Long = {
df.select(colName).filter(col(colName).isNull).count()
}
/**
* This function counts the null cell number for all columns of the source data frame
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @return DataFrame, It returns a data frame which contains three columns ("column_name","null_cell_count","null_cell_percentage")
**/
def getNullCountForAllCols(df: DataFrame): DataFrame = {
val spark = df.sparkSession
val totalLineNb = df.count()
import spark.implicits._
val buf = scala.collection.mutable.ListBuffer.empty[(String, Long, Double)]
for (colName <- df.columns) {
val nulLineNb = getNullCount(df, colName)
val nullPercentage: Double = (nulLineNb.toDouble / totalLineNb.toDouble) * 100
buf.append((colName, nulLineNb, nullPercentage))
}
val result = buf.toList.toDF("column_name", "null_cell_count", "null_cell_percentage")
return result
}
/**
* This function counts the not a number cell number
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colName second column value to be merged
* @return Long, It returns the number of null cell
**/
def getNaNCount(df: DataFrame, colName: String): Long = {
df.select(colName).filter(col(colName).isNaN).count()
}
/**
* This function uses regular expression to check if a string value is a digit or not.
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param value A string value
* @return Boolean
**/
def isDigit(value: String): Boolean = {
if (value ==null) return true
else if (value.equals(" ")) return false
else {
// we use regular expression,
// ^-? : It means it may starts with -
// [0-9]+ : followed by at least one digit between 0 and 9
// (\.|,) : It can be separated by . or , we need protect . with \. because its a key word in regular expression.
// [0-9]+ : followed by at least one digit.
// ((\.|,)[0-9]+)? : means this is optional.
return value.matches("^-?[0-9]+((\\.|,)[0-9]+)?$")
}
}
/**
* This function returns a new data frame which contains a new column which indicates the target column contains
* digits or not
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colName the column name which we need to check if it contains no digit number or not
* @return DataFrame
**/
def getIsDigitDF(df: DataFrame, colName: String): DataFrame = {
val spark = df.sparkSession
import spark.implicits._
//register a udf for isDigit method
spark.udf.register("isDigit", (arg: String) => isDigit(arg))
//create column colName_isDigit,
df.withColumn(s"${colName}_isDigit", expr(s"isDigit($colName)"))
}
/**
* This function is the overload version of getIsDigitDF, it takes a list of column names, and returns a new data
* frame which contains a new column for each target column which indicates the target column contains
* digits or not
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colNames A list of the column name which we need to check if it contains no digit number or not
* @return DataFrame
**/
def getIsDigitDF(df: DataFrame, colNames: List[String]): DataFrame = {
var result = df
for (colName <- colNames) {
result = getIsDigitDF(result, colName)
}
return result
}
/**
* This function takes a data frame produced by getIsDigitDF, it counts the cell that is not a digit
* and calculates a percentage based on the total number, then returns these information as a data frame
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @return DataFrame
**/
def getIsNotDigitCount(df: DataFrame): DataFrame = {
val spark = df.sparkSession
import spark.implicits._
// get column names as an array
val colNames = df.columns.toArray
val totalCount = df.count()
// create a buffer to store result before converting to data frame
val buf = scala.collection.mutable.ListBuffer.empty[(String, Long, Double)]
for (colName <- colNames) {
val index = colName.lastIndexOf("_isDigit")
if (index > 1) {
val sourceColName = colName.substring(0, index)
val noDigitCount = df.filter(col(colName) === false).count()
val percentage: Double = (noDigitCount.toDouble / totalCount.toDouble) * 100
buf.append((sourceColName, noDigitCount, percentage))
}
}
buf.toList.toDF("column_name", "isNotDigit_cell_count", "isNotDigit_cell_percentage")
}
/**
* This function takes a data frame produced by getIsDigitDF, it shows distinct values of the cell that is not a digit
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @param colNames list of column names that contains cell which is not digit
* @return DataFrame
**/
def showNotDigitValues(df:DataFrame,colNames:List[String])={
for(colName<-colNames){
val badValues= df.filter(col(s"${colName}_isDigit")===false).select(colName).distinct()
badValues.show(badValues.count().toInt,false)
}
}
/**
* This function takes a data frame produced by getIsDigitDF, it returns a new data frame which contains only the
* lines with bad values(String value in a digit column).
*
* @author <NAME>
* @version 1.0
* @since 2020-01-27
* @param df source data frame
* @return DataFrame
**/
def showLinesWithBadValue(df:DataFrame):DataFrame={
val spark=df.sparkSession
// get column names as an Array
val colNames=df.columns.toArray
// get schema of the data frame
val schema=df.schema
// to create an empty data frame with a specific schema, we need to use the sparkContext to create an empty RDD
val sc=spark.sparkContext
var result:DataFrame=spark.createDataFrame(sc.emptyRDD[Row], schema)
for(colName<-colNames){
if(colName.contains("_isDigit")){
result=result.union(df.filter(col(colName)===false))
}
}
return result
}
/** **************************************************************************************************************
* ************************************** error1.csv ***********************************************************
* ************************************************************************************************************/
/*
Beijin,2016,100.0
Warsaw,2017,200
Boston,2015,ok
,,
Benxi,love,150
Toronto,2017,50
GuangZhou,2017,50
,,
,,
,,
* */
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson06_Spark_Streaming/Lesson06_5_5_Operations_On_Streaming_DF.scala | package org.pengfei.Lesson06_Spark_Streaming
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.expressions.scalalang.typed
import org.apache.spark.sql.streaming.{OutputMode, Trigger}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
object Lesson06_5_5_Operations_On_Streaming_DF {
val host="localhost"
val port=9999
def main(args:Array[String])={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder.appName("Lesson6_5_5_Operations_On_Streaming_DF").master("local[2]").getOrCreate()
/*****************************************************************************************************************
* ***************************** 6.5.5 Operations on streaming Dataframe/DataSets *******************************
* ***************************************************************************************************************/
/***********************************6.5.5.1 Basic Operations******************************************************/
/* Most of the common dataframe/set operations are supported for streaming. The few operations that are not
* supported :
* 1. Multiple streaming aggregations (i.e. a chain of aggregations on a streaming DF) are not yet supported
* on streaming datasets.
* 2. Limit and first N rows are not supported on streaming datasets
* 3. Distinct operations on streaming are not supported
* 4. Sorting operations are supported on streaming datasets only after an aggregation and in Complete output mode
* 5. Few types of outer joins on streaming dataset are not supported. See section 6.5.5.1.1
*
* In addition, there are some Dataset methods that will not work on streaming Datasets. They are actions that
* will immediately run queries and return results, which does not make sense on a streaming Dataset. Rather, those
* functionalities can be done by explicitly starting a streaming query (see the next section regarding that).
*
* count() - Cannot return a single count from a streaming Dataset. Instead, use ds.groupBy().count() which returns
* a streaming Dataset containing a running count.
*
* foreach() - Instead use ds.writeStream.foreach(...) (see next section).
*
* show() - Instead use the console sink (see next section).
*
* If you try any of these operations, you will see an AnalysisException like “operation XYZ is not supported with
* streaming DataFrames/Datasets”. While some of them may be supported in future releases of Spark, there are others
* which are fundamentally hard to implement on streaming data efficiently. For example, sorting on the input stream
* is not supported, as it requires keeping track of all the data received in the stream. This is therefore
* fundamentally hard to execute efficiently.*/
/*********************************6.5.5.1.1 Support matrix for joins in streaming queries*************************/
/* We can do joins between static and stream dataframe, below is the matrix which describe which join is supported
*
* Left Input | Right Input | Join Type | Support or not
* Static | Static | All types | Supported, since its not on streaming data even though it can be present in a streaming query
* Stream | Static | Inner | Supported, not stateful
* | | Left Outer | Supported, not stateful
* | | Right Outer | Not supported
* | | Full Outer | Not supported
* Static | Stream | Inner | Supported, not stateful
* | | Left Outer | Not supported
* | | Right Outer | Supported, not stateful
* | | Full Outer | Not supported
* Stream | Stream | Inner | Supported, optionally specify watermark on both sides + time constraints for state cleanup
* | | Left Outer | Conditionally supported, must specify watermark on right + time constraints for correct results, optionally specify watermark on left for all state cleanup
* | | Right Outer | Conditionally supported, must specify watermark on left + time constraints for correct results, optionally specify watermark on right for all state cleanup
* | | Full Outer | Not supported
*
* Additional details on supported joins:
*
* - Joins can be cascaded, that is, you can do df1.join(df2, ...).join(df3, ...).join(df4, ....).
* - As of Spark 2.3, you can use joins only when the query is in Append output mode. Other output modes are
* not yet supported.
* - As of Spark 2.3, you cannot use other non-map-like operations before joins. Here are a few examples of
* what cannot be used.
* - Cannot use streaming aggregations before joins.
* - Cannot use mapGroupsWithState and flatMapGroupsWithState in Update mode before joins */
/******************************6.5.5.1.2 Selection/Projection/aggregation *****************************************/
// BasicOperationExample(spark)
/********************************6.5.5.1.3 Stateless Aggregations **********************************************/
/* In structured streaming, all aggregations are stateful by default. As we saw in BasicOperationExample when we do
* groupBy and count on dataframe, spark remembers the state from the beginning. Also we write the complete
* output every time when we receive the data as state keeps on changing.
*
* Most of the time, the scenarios of stream processing need to be stateful, it comes with the cost of state
* management and state recovery in the case of failures. So if we are doing simple ETL processing on stream,
* we may not need state to be kept across the stream. Sometime we want to keep the state just for small batch
* and then reset.
*
* For example, let’s say we want to count the words for every 5 seconds. Here the aggregation is done on
* the data stream which is collected for last 5 seconds. The state is only kept for those 5 seconds and then
* forgotten. So in case of failure, we need to recover data only for last 5 seconds.
* */
// StatelessAggExample(spark)
/******************************** 6.5.5.2 Window operations on Event Time ****************************************/
/* Aggregations over a sliding event-time window are straightforward with Structured Streaming and are very
* similar to grouped aggregations. In a grouped aggregation, aggregate values (e.g. counts) are maintained
* for each unique value in the user-specified grouping column. In case of window-based aggregations,
* aggregate values are maintained for each window the event-time of a row falls into. Let’s understand
* this with an illustration.
*
* Imagine data stream now contains lines along with the time when the line was generated. Instead of running
* word counts, we want to count words within 10 minute windows, updating every 5 minutes. That is, word counts
* in words received between 10 minute windows 12:00 - 12:10, 12:05 - 12:15, 12:10 - 12:20, etc. Note that
* 12:00 - 12:10 means data that arrived after 12:00 but before 12:10. Now, consider a word that was received
* at 12:07. This word should increment the counts corresponding to two windows 12:00 - 12:10 and 12:05 - 12:15.
* So the counts will be indexed by both, the grouping key (i.e. the word) and the window (can be calculated from
* the event-time).*/
// WindowOperationExample(spark)
/***************************** 6.5.5.3 Handling late data and watermarking ********************************/
/* Now consider what happens if one of the events arrives late to the application. For example, say, a word generated
* at 12:04 (i.e. event time) could be received by the application at 12:11. The application should use the time 12:04
* instead of 12:11 to update the older counts for the window 12:00 - 12:10. This occurs naturally in our window-based
* grouping – Structured Streaming can maintain the intermediate state for partial aggregates for a long period of
* time such that late data can update aggregates of old windows correctly, as illustrated below.
*
* However, to run this query for days, it’s necessary for the system to bound the amount of intermediate in-memory
* state it accumulates. This means the system needs to know when an old aggregate can be dropped from the in-memory
* state because the application is not going to receive late data for that aggregate any more. To enable this,
* in Spark 2.1, we have introduced watermarking, which lets the engine automatically track the current event
* time in the data and attempt to clean up old state accordingly. You can define the watermark of a query by
* specifying the event time column and the threshold on how late the data is expected to be in terms of event time.
* For a specific window starting at time T, the engine will maintain state and allow late data to update the state
* until (max event time seen by the engine - late threshold > T). In other words, late data within the threshold
* will be aggregated, but data later than the threshold will start getting dropped (see later in the section for
* the exact guarantees). Let’s understand this with an example. We can easily define watermarking on the previous
* example using withWatermark() as shown below.
* */
// WaterMarkOperationExample(spark)
/* Some sinks (e.g. files) may not supported fine-grained updates that Update Mode requires. To work with them,
* we have also support Append Mode, where only the final counts are written to sink.
*
* Similar to the Update Mode earlier, the engine maintains intermediate counts for each window. However,
* the partial counts are not updated to the Result Table and not written to sink. The engine waits for
* “10 mins” for late date to be counted, then drops intermediate state of a window < watermark,
* and appends the final counts to the Result Table/sink. For example, the final counts of window 12:00 - 12:10 is
* appended to the Result Table only after the watermark is updated to 12:11.
*
* Note that using withWatermark on a non-streaming Dataset is no-op. As the watermark should not affect any batch
* query in any way, we will ignore it directly.*/
/* Conditions for watermarking to clean aggregation state
*
* It is important to note that the following conditions must be satisfied for the watermarking to clean the
* state in aggregation queries (as of Spark 2.1.1, subject to change in the future)
* - output mode must be Append or Update. Complete mode requires all aggregate data to be preserved,
* and hence cannot use watermarking to drop intermediate state. See the Output Modes section for
* detailed explanation of the semantics of each output mode.
*
* - The aggregation must have either the event-time column, or a window on the event-time column.
*
* - withWatermark must be called on the same column as the timestamp column used in the aggregate.
* For example, df.withWatermark("time", "1 min").groupBy("time2").count() is invalid in Append output mode,
* as watermark is defined on a different column from the aggregation column.
*
* - withWatermark must be called before the aggregation for the watermark details to be used. For example,
* df.groupBy("time").count().withWatermark("time", "1 min") is invalid in Append output mode.
* */
/* Semantic Guarantees of Aggregation with Watermarking
*
* - A watermark delay (set with withWatermark) of “2 hours” guarantees that the engine will never drop any data
* that is less than 2 hours delayed. In other words, any data less than 2 hours behind (in terms of event-time)
* the latest data processed till then is guaranteed to be aggregated.
*
* - However, the guarantee is strict only in one direction. Data delayed by more than 2 hours is not guaranteed
* to be dropped; it may or may not get aggregated. More delayed is the data, less likely is the engine going
* to process it.*/
/***********************************6.5.5.4 Join Operations******************************************************/
/* Structured Streaming supports joining a streaming Dataset/DataFrame with a static Dataset/DataFrame as well as
* another streaming Dataset/DataFrame. The result of the streaming join is generated incrementally, similar to the
* results of streaming aggregations in the previous section. In this section we will explore what type of joins
* (i.e. inner, outer, etc.) are supported in the above cases. Note that in all the supported join types, the result
* of the join with a streaming Dataset/DataFrame will be the exactly the same as if it was with a static
* Dataset/DataFrame containing the same data in the stream.
*
* But there are limits on join operations, see section "6.5.5.1.1 Support matrix for joins" for join support.
*
* In spark2.3, spark added support for stream-stream joins, the challenge of generating join results between two
* data stream is that, at any point of time, the view of the dataset is incomplete for both sides of the join. As
* a result, it's hard to find matches between inputs. */
}
def BasicOperationExample(spark:SparkSession):Unit={
//process data from socket source
// BasicOperationOnSocket(spark)
//process data from file source
// BasicOperationOnFile(spark)
}
def BasicOperationOnSocket(spark:SparkSession):Unit={
import spark.implicits._
val lines= spark.readStream.format("socket").option("host",host).option("port",port).load()
lines.isStreaming // Returns True for DataFrames that have streaming sources
//lines.printSchema
val devices:Dataset[DeviceData]=lines.as[String].map{_.split(";") match {
case Array(device,deviceType,signal,time)=>DeviceData(device,deviceType,signal.toDouble,time)
}}
// devices.printSchema()
val meanTemp=devices.groupBy($"device").mean("signal")
val query=devices.writeStream.outputMode("update").format("console").start()
val query1=meanTemp.writeStream.outputMode("complete").format("console").start()
query.awaitTermination()
query1.awaitTermination()
}
def BasicOperationOnFile(spark:SparkSession):Unit={
import spark.implicits._
val deviceSchema=new StructType().add("device","string").add("deviceType","string").add("signal","double").add("time","string")
val schema=StructType(Array(
StructField("device",StringType,false),
StructField("deviceType",StringType,false),
StructField("signal",DoubleType,false),
StructField("time",StringType,false)
))
val path="/DATA/data_set/spark/basics/Lesson6_Spark_Streaming/structrued_streaming/device"
//df untyped
val df:DataFrame=spark.readStream.option("sep",";").schema(deviceSchema).csv(path)
df.printSchema()
//We can also create temp or global view for sql query
df.createOrReplaceTempView("device")
// all query on streaming data frame view also return another streaming df
val countDevice=spark.sql("select count(*) from device")
val ds:Dataset[DeviceData]=df.as[DeviceData]
// select devices which have signal more than 27
val d1=df.select("device").where("signal>27")
val d2=ds.filter(_.signal>27).map(_.device)
// average signal for each device
val d3=ds.groupByKey(_.device).agg(typed.avg(_.signal))
val query=d3.writeStream.outputMode("update").format("console").start()
query.awaitTermination()
}
case class DeviceData(device:String, deviceType:String,signal:Double,time:String)
def StatelessAggExample(spark: SparkSession):Unit ={
import spark.implicits._
val lines=spark.readStream.format("socket").option("host",host).option("port",port).load()
val words=lines.as[String].flatMap(_.split(";"))
/* Rather than using groupBy API of dataframe, we use groupByKey from the dataset. As we need to group on words,
* we just pass the same value to grouping function. If you have complex object, then you can choose which
* column you want to treat as the key.
*
* flatMapGroups is an aggregation API which applies a function to each group in the dataset. It’s
* only available on grouped dataset. This function is very similar to reduceByKey of RDD world which allows
* us to do arbitrary aggregation on groups.
*
* In our example, we apply a function for every group of words, we do the count for that group.
*
* One thing to remember is flatMapGroups is slower than count API. The reason being flatMapGroups doesn’t
* support the partial aggregations which increase shuffle overhead. So use this API only to do small batch
* aggregations. If you are doing aggregation across the stream, use the stateful operations.
* */
val countDs = words.groupByKey(value=>value).flatMapGroups{
case (value, iter)=> Iterator((value, iter.length))
}.toDF("value", "count")
val query = countDs.writeStream.format("console").outputMode("append").trigger(Trigger.ProcessingTime("5 seconds")).start()
query.awaitTermination()
}
def WindowOperationExample(spark:SparkSession):Unit={
import spark.implicits._
//streaming DataFrame of schema { word: String, timestamp: String }
val lines=spark.readStream.format("socket").option("host",host).option("port",port).load()
/* Since this windowing is similar to grouping, in code, you can use groupBy() and window() operations to
* express windowed aggregations. */
val words=lines.as[String].map{_.split(";") match {
case Array(word,timestamp)=>WordWithTime(word,timestamp)
}}
//convert string time to timestamp, now streaming DataFrame of schema { word: String, timestamp: Timestamp }
val wordsTime=words.select($"word",unix_timestamp($"time","yyyy/MM/dd HH:mm:ss").cast(TimestampType).as("timestamp"))
val windowedCounts = wordsTime.groupBy(window($"timestamp","10 minutes","5 minutes"),$"word").count()
val query= windowedCounts.writeStream.format("console").outputMode("append").start()
query.awaitTermination()
}
def WaterMarkOperationExample(spark:SparkSession):Unit={
import spark.implicits._
val lines:DataFrame = spark.readStream.format("socket").option("host",host).option("port",port).load()
val words:Dataset[WordWithTime] = lines.as[String].map{_.split(";") match {
case Array(word,timestamp)=>WordWithTime(word,timestamp)
}}
//convert string time to timestamp, now streaming DataFrame of schema { word: String, timestamp: Timestamp }
val wordsTime=words.select($"word",unix_timestamp($"time","yyyy/MM/dd HH:mm:ss").cast(TimestampType).as("timestamp"))
val windowedCounts=words.withWatermark("timestamp","10 minutes")
.groupBy(window($"timestamp","10 minutes","5 minutes"), $"word").count()
/* In this example, we are defining the watermark of the query on the value of the column “timestamp”,
* and also defining “10 minutes” as the threshold of how late is the data allowed to be. If this query is run in
* Update output mode (discussed later in Output Modes section), the engine will keep updating counts of a window
* in the Result Table until the window is older than the watermark, which lags behind the current event time in
* column “timestamp” by 10 minutes. */
val query=windowedCounts.writeStream.format("console").outputMode("update").start()
query.awaitTermination()
}
case class WordWithTime(word:String,time:String)
def JoinOperationExample(spark:SparkSession):Unit={
import spark.implicits._
val schema=StructType(Array(
StructField("device",StringType,false),
StructField("deviceType",StringType,false),
StructField("signal",DoubleType,false),
StructField("time",StringType,false)
))
val path="/DATA/data_set/spark/basics/Lesson6_Spark_Streaming/structrued_streaming/device"
val deviceDf:DataFrame=spark.readStream.option("sep",";").schema(schema).csv(path)
val providerDf:DataFrame=spark.sparkContext.parallelize(Seq(
DeviceProvider("t1","pengfei","CN"),
DeviceProvider("t2","HuaWei","CN"),
DeviceProvider("t3","bar","FR")
)).toDF()
//stream df join static df, only inner and left join is supported,
// The full supported join matrix is in section 6.5.5.1.1
// inner equi-join with a static DF
deviceDf.join(providerDf,"type")
}
case class DeviceProvider(device:String,deviceProvider:String,providerContry:String)
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson04_Spark_SQL/Lesson04_1_Spark_SQL_Intro.scala | package org.pengfei.Lesson04_Spark_SQL
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
object Lesson04_1_Spark_SQL_Intro {
def main(args: Array[String]):Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().master("local[2]").appName("Lesson4_1_Saprk_SQL_Intro").getOrCreate()
/** ***********************************************************************************************
* ***********************************4.1 Spark sql introduction ********************************
* **********************************************************************************************/
/* Spark SQL is a Spark library that runs on top of Spark. It provides a higher-level abstraction than the Spark
* core API for processing structured data. Structured data includes data stored in a database, NoSQL data
* store, Parquet, ORC, Avro, JSON, CSV, or any other (semi-structured) format
*
* Spark SQL can be used as a library for developing data processing applications in Scala, Java, Python,
* or R. It supports multiple query languages, including SQL, HiveQL, and language integrated queries. In
* addition, it can be used for interactive analytics with just SQL/HiveQL. In both cases, it internally uses the
* Spark core API to execute queries on a Spark cluster*/
/** ******************************4.1.1 Integration With other libraries ********************************/
/* Spark SQL seamlessly integrates with other Spark libraries such as Spark Streaming, Spark ML, and GraphX
* It can be used for not only interactive and batch processing of historical data but also live data stream
* processing (not really, but micro batch processing) along with Spark Streaming. Similarly, it can be used
* in machine learning applications with MLlib and Spark ML. For example, Spark SQL can be used for feature
* engineering in a machine learning application.*/
/** ********************************4.1.2 Data Sources *************************************************/
/* Spark SQL supports a variety of data sources. It can be used to process data stored in a file, a NoSQL
* datastore, or a database. A file can be on HDFS, S3, or local file system. The file formats supported by Spark
* SQL include CSV, JSON, Parquet, ORC, and Avro.
*
* Spark SQL supports a number of relational databases and NoSQL datastores. The relational databases
* supported by Spark SQL include PostgreSQL, MySQL, H2, Oracle, DB2, MS SQL Server, and other databases
* that provide JDBC connectivity. The NoSQL data stores that can be used with Spark SQL include HBase,
* Cassandra, Elasticsearch, Druid, and other NoSQL data stores. The list of data sources that can be used with
* Spark SQL keeps growing.*/
/** ********************************4.1.3 Data Processing interface *************************************************/
/* Spark SQL exposes three data processing interfaces: SQL, HiveQL and language integrated queries. It translates
* queries written using any of these interfaces into Spark core API calls.
*
* As previously mentioned, both SQL and HiveQL are higher-level declarative languages. In a declarative
* language, you just specify what you want. They are much easier to learn and use. Therefore, they are popular
* as the language of choice for data processing and analytics.
*
* However, not all programmers know SQL or HiveQL. Instead of forcing these programmers to learn
* another language, Spark SQL supports language integrated queries in Scala, Java, Python and R. With
* language integrated queries, Spark SQL adds data processing capabilities to the host language; programmers
* an process and analyze data using the native host language syntax.
*
* In addition, language integrated queries eliminate the impedance mismatch between SQL and the
* other programming languages supported by Spark. It allows a programmer to query data using SQL and
* process the results in Scala, Java, Python or R.
*
* Another benefit of language integrated queries is that it reduces errors. When SQL is used for querying
* data, a query is specified as a string. A compiler cannot detect errors within a string. Therefore, errors within
* a query string are not found until an exception is thrown at runtime. Some of these errors can be eliminated
* by using equivalent language integrated queries.
* */
/** ********************************4.1.4 Hive Interoperability *************************************************/
/* Spark SQL is compatible with Hive. It not only supports HiveQL, but can also access Hive metastore,
* SerDes, and UDFs. Therefore, if you have an existing Hive deployment, you can use Spark SQL alongside Hive.
* You do not need to move data or make any changes to your existing Hive metastore
*
* You can also replace Hive with Spark SQL to get better performance. Since Spark SQL supports HiveQL
* and Hive metastore, existing Hive workloads can be easily migrated to Spark SQL. HiveQL queries run much
* faster on Spark SQL than on Hive.
*
* Starting with version 1.4.0, Spark SQL supports multiple versions of Hive. It can be configured to read
* Hive metastores created with different versions of Hive.
*
* Note that Hive is not required to use Spark SQL. You can use Spark SQL with or without Hive. It has a
* built-in HiveQL parser. In addition, if you do not have an existing Hive metastore, Spark SQL creates one.
*
* */
/** ***********************************************************************************************
* ***********************************4.2 Spark Performance ********************************
* **********************************************************************************************/
/* Spark sql makes data processing applications run faster using a combinaiton of techniques, including
* - Reduced disk I/O
* - In memory columnar caching
* - Query optimization
* - Code generation
* */
/** **********************************4.2.1 Reduced Disk I/O ***************************************/
/* Disk I/O is slow. It can be a significant contributor to query execution time. Therefore, Spark SQL reduces
* disk I/O wherever possible. For example, depending on the data source, it can skip non-required partitions,
* rows, or columns while reading data.
*
* Reading all data to analyze a part of it is inefficient. For example, a query may have filtering clause
* that eliminates a significant amount of data before further processing. Thus, a lot of I/O is wasted on
* data that is never used. It can be avoid by partitioning a dataset.
*
* Partitioning is a proven technique for improving read performance. A partitioned dataset is split into horizontal
* slices. Data may be partitioned by one or more columns. With partitioned datasets, spark sql skips the partitions
* that an application never uses.
*
* Predicate Pushdown
* Spark SQL also reduces disk I/O by using predicate pushdowns if a data source supports it. For example,
* if you read data from a relational database using Spark SQL and then apply some filtering operation to it,
* Spark SQL will push the filtering operation to the database. Instead of reading an entire table and then
* executing a filtering operation, Spark SQL will ask the database to natively execute the filtering operation.
* Since databases generally index data, native filtering is much faster than filtering at the application layer.*/
/** ***********************************4.2.2 In Memory Columnar Caching *********************************/
/* What is a columnar storage?
* A structured dataset has a tabular format. It is organized into rows and columns.
* A dataset may have a large number of columns. However, an analytics application generally processes
* only a small percentage of the columns in a dataset. Nevertheless, if data is stored in a row-oriented
* storage format, all columns have to be read from disk. Reading all columns is wasteful and slows down an
* application. Spark SQL supports columnar storage formats such as Parquet, which allow reading of only the
* columns that are used in a query.
*
* Spark SQL allows an application to cache data in an in-memory columnar format from any data source.
* For example, you can use Spark SQL to cache a CSV or Avro file in memory in a columnar format.
* When an application caches data in memory using Spark SQL, it caches only the required columns.
* In addition, Spark SQL compresses the cached columns to minimize memory usage and JVM garbage
* collection pressure. Use of columnar format for caching allows Spark SQL to apply efficient compression
* techniques such as run length encoding, delta encoding, and dictionary encoding.
*
* Skip Rows
* If a data source maintains statistical information about a dataset, Spark SQL takes advantage of it. For example,
* serialization formats such as Parquet and ORC store min and max values for each column in a row group or
* chunk of rows. Using this information, Spark SQL can skip reading chunk of rows.
* */
/** ********************************* 4.2.3 Query Optimization *******************************************/
/* Similar to database systems, Spark SQL optimizes a query before executing it. It generates an optimized
* physical query plan when a query is given to it for execution. It comes with a query optimizer called Catalyst,
* which supports both rule and cost based optimizations. It can even optimize across functions.
*
* Spark SQL optimizes both SQL/HiveQL and language integrated queries submitted through its
* DataFrame(DataSet and dataframe are unified in java and scala since spark2.0+) API. They share the same
* query optimizer and execution pipeline. Thus, from a performance perspective it does not matter
* whether you use SQL, HiveQL or DataFrame API; they go through the same optimization steps. The DataFrame
* API is covered later in this chapter.
*
* Catalyst splits query execution into four phases: analysis, logical optimization, physical planning, and
* code generation:
*
* - The analysis phase starts with an unresolved logical plan and outputs a logical plan.
* An unresolved logical plan contains unresolved attributes. An unresolved attribute, for
* example, could be a column whose data type or source table is not yet known. Spark
* SQL uses rules and a catalog to resolve unbound attributes in a query expression.
* The Spark SQL catalog object tracks the columns and tables in all data sources.
*
* - In the logical optimization phase, Spark SQL applies rule-based optimizations to
* the logical plan generated by the analysis phase. Rule-based optimizations include
* constant folding, predicate pushdown, projection pruning, null propagation,
* Boolean expression simplification, and other optimizations.
*
* - The next phase is the physical planning phase. In this phase, Spark SQL selects an
* optimal physical plan using a cost-model. It takes as input the optimized logical
* plan generated by the logical optimization phase. Using rules, it then generates
* one or more physical plans that can be executed by the Spark execution engine.
* Next, it computes their costs and selects an optimal plan for execution. In addition,
* it performs rule-based physical optimizations, such as pipelining projections or
* filters into one Spark operation. It also pushes operations from the logical plan into
* data sources that support predicate or projection pushdown. Spark SQL generates
* optimized physical plans even for inefficient logical plans
*
* - The last phase is the code generation phase, where Spark SQL compiles parts of a
* query directly to Java bytecode. It uses a special Scala language feature to transform a
* tree representing a SQL expression to a Scala AST (Abstract Syntax Tree), which is fed
* to the Scala compiler at runtime to generate bytecode. Thus, it avoids using the Scala
* parser at runtime. This speeds up query execution. The generated code generally
* performs as fast as or faster than hand-tuned Scala or Java program.
* */
/** ***********************************************************************************************
* ***********************************4.3 Spark Applications ********************************
* **********************************************************************************************/
/** ****************************************4.3.1 ETL (Extract Transform Load) ****************************/
/* ETL is the process of reading data from one or more sources, applying some transformation on the data, and
* writing it to another data source. Conceptually, it consists of three steps: extract, transform and load. These
* steps need not be sequential; an application does not have to extract all the data before moving it to the
* transform step. Once it has extracted a portion of the data, it may run the three steps in parallel.
*
* - Extract : involves reading data from one or more operational systems. The data source could be a database,
* API or a file. The source database can be a relational database or a NoSQL data source.
* A file can be CSV, JSON, XML, Parquet, ORC, Avro, Protocol Buffers, or any other format.
*
* - Transform : involves cleaning and modifying the source data using some rules. For example, rows with invalid
* data may be dropped or columns with null values may be populated with some value. It may also
* include, concatenating two columns, splitting a column into multiple columns, encoding a column,
* translating a column from one encoding to a different encoding, or any other operations which can
* make data ready for the destination system
*
* - Load : involves writes data to a destination system. The destination can be a database, a file, etc.
*
* Generally, ETL is used for data warehousing. Data is collected from a number of different operational systems,
* cleaned, transformed and stored into a data warehouse. However, ETL is not unique to data warehousing.
* For example, it can be used to enable sharing of data between two disparate systems. It can be used to convert
* data from one format to another. Similarly, migrating data from a legacy system to a new system is an
* ETL process.
*
* Spark Sql is great for ETL
* - support of many datasource (input and output)
* - parallel data transformation for high performance.
* */
/** ******************************* 4.3.2 Distributed JDBC/ODBC SQL Query Engine ******************************/
/* Spark SQL can be used in two ways:
* - First, it can be used as a library. In this mode, data processing tasks can be expressed as SQL, HiveQL or
* language integrated queries within a Scala, Java, Python, or R application.
* - Second, Spark SQL can be used as a distributed SQL query engine. It comes prepackaged with a
* Thrift/JDBC/ODBC server. A client application can connect to this server and submit SQL/HiveQL queries
* using Thrift, JDBC, or ODBC interface.
*
* Spark SQL provides a command-line client called Beeline, which can be used to submit HiveQL queries, however, the
* spark JDBC/ODBC server can be queried from any application that supports JDBC/ODBC. For example, you can use it
* with a sql client such as SQuirrel. Similarly, it can be queried from BI and data visulization tools such as
* Tableau, Zoomdata and Qlik.
*
* The thrift JDBC/ODBC server provides two benefits.
* - First, it allows query to be written in SQL/HiveQL.
* - Second, it makes it easy for multiple users to share a single spark cluster.
*
* The Spark SQL JDBC/ODBC server looks like a database; however, unlike a database, it does not have a
* built-in storage engine. It is just a distributed SQL query engine, which uses Spark under the hood and can
* be used with a variety of a data sources*/
/** ********************************4.3.3 Data Warehousing ********************************************/
/* A conventional data warehouse is essentially a database that is used to store and analyze large amounts of
* data. It consists of three tightly integrated components: data tables, system tables, and a SQL query engine.
*
* The data tables, store user data (data source of origin operational systems).
* The system tables, store metadata about the data in the data tables
* The SQL query engine provides a SQL interface to store and analyze the data in the data talbes.
*
* In general, all the three components are generally packaged together as a proprietary software or appliance
*
* Spark SQL can be used to build an open source data warehousing solution. It provide a distributed SQL
* query engine, which can be paired with a variety of open source storage systems such as HDFS. Thus,
* it supports a modular architecture that allows users to mix and match different components in the data
* warehouse stack.
*
* For example, user data can be stored in HDFS or S3 using a columnar format such as Parquet or ORC file format.
* SQL engine can be spark SQL, Hive, Impala, Presto, or Apache Drill.
*
* A spark sql based data warehousing solution is more
* - scalable, (Storage and processing capacity can be easily increased by adding more nodes to hdfs/spark cluster)
* - economical, (hadoop/spark eco-system are all open sources and run on commodity hardware)
* - flexible (it supports both schema-on-read and schema-on-write)
*
* Schema-on-write systems require a schema to be defined before data can be stored(e.g. traditional databases).
* These systems require users to create a data model before data can be stored. The benefit of schema-on-write
* systems is that they provide efficient storage and allow fast interactive queries. Spark SQL support
* schema-on-write through columnar formats such as Parquet and ORC. A combination of HDFS, columnar file format
* and sparkSQL can be used to build a high-performance data warehousing solution.
*
* Although schema-on-write systems(SOW) enable fast queries, they have a few disadvantages.
* - Fist, a SOW system requires data to be modeled before it ban be stored.
* - Second, data ingestion in a SOW system is slow.
* - Third, schemas are hard to change once a large amount of data has been stored. For example, adding a new
* column or changing a column type in a database with terabytes of data can be chanllenging.
* - Fouth, SOW can stored unstructured, semi-structrured or multi-structured data.
*
*
* We know that data modeling is not a trivial task. It requires upfront planning and good understanding of data.
* And it may change with time, We may need to modify the data model as the requirements changes Which SOW can't do
* well.
*
* Schema-on-read (SOR) address preceding issues. It allows data to be stored in its row format. A schema is
* applied to data when data it is read. Therefore, data can start flowing into a SOR system anytime. A user
* can store data in its native format without worrying about how it will be queried. At query time, users
* can apply different schema on the stored data depending on the requirements. SOR ont only enables agility
* but also alows complex evolving data.
*
* The disadvantage of a SOR system is that queries time are slower than SOW. Generally, SOR is used for
* exploratory analytics, or ETL.
*
* Spark SQL can build a data warehouse support both SOW ans SOR. So it can be used for exploratory analytics, etl
* and high performance analytics.
*
*
* */
/** ***********************************************************************************************
* *********************4.4 Spark Application Programming Interface (API) *********************
* **********************************************************************************************/
/* Spark SQL provides an application programming interface (API) in multiple languages(e.g. scala, java, python and
* R at 08-2018.). You can mix SQL/HiveQl and native language API in one spark application
*
* Key abstractions:
* - SQLContext
* - HiveContext
* - DataFrame/DataSet*/
// Create Dataframe for the spark sql api test
val personsRDD = spark.sparkContext.parallelize(List(
Person(0, "Alice", 30),
Person(1, "Bob", 30),
Person(2, "Charles", 21),
Person(3, "Defence", 20),
Person(4, "Eletro", 58),
Person(5, "Bob", 40)))
import spark.implicits._
// the above import is needed to call implicit rdd to dataframe conversion(.toDF)
// The best way is use CreateDataFrame() method, we will talk in details in next section.
/* if you see .toDF is not a value of rdd person, it's caused by the case class person is inside the main method,
* just move it outside the main, it will be ok*/
val personsDF = personsRDD.toDF()
personsDF.show()
/****************************************4.4.1 SQLContext ************************************************/
/* SQLContext is the main entry point into the Spark SQL library. It is a class defined in the Spark SQL library.
* A Spark SQL application must create an instance of the SQLContext or HiveContext class.
*
* SQLContext is required to create instances of the other classes provided by the Spark SQL library. It is
* also required to execute SQL queries.
*
* In the method sqlContextOperation, we can see how it works*/
// sqlContextOperation(spark,personsDF)
/************************************4.4.2 HiveContext(Read/Write hive tables) *******************************/
/* Since spark2.0+, SparkSession is now the new entry point of Spark that replaces the old SQLContext and
* HiveContext. In implementation level, SparkSession has merged SQLContext and HiveContext in one object
* in Spark 2.0. Note that the old SQLContext and HiveContext are kept for backward compatibility. A new
* catalog interface is accessible from SparkSession - existing API on databases and tables access such as
* listTables, createExternalTable, dropTempView, cacheTable are moved here. The below description is for old
* hiveContext, only for knowledge. We will use sparkSession to read*/
/* HiveContext is an alternative entry point into the Spark SQL library. It extends the SQLContext class for
* processing data stored in Hive. It also provides a HiveQL parser. A Spark SQL application must create an
* instance of either this class or the SQLContext class.
*
* HiveContext provides a superset of the functionality provided by SQLContext. The parser that comes
* with HiveContext is more powerful than the SQLContext parser. It can execute both HiveQL and SQL queries.
* It can read data from Hive tables. It also allows applications to access Hive UDFs (user-defined functions).
*
* Note that Hive is not a requirement for using HiveContext. You can use HiveContext even if you do not
* have Hive installed. In fact, it is recommended to always use HiveContext since it provides a more complete
* parser than the SQLContext class.
*
* If you want to process Hive tables or execute HiveQL queries on any data source, you must create an
* instance of the HiveContext class. It is required for working with tables defined in the Hive metastore.
* In addition, if you want to process existing Hive tables, add your hive-site.xml file to Spark’s classpath, since
* HiveContext reads Hive configuration from the hive-site.xml file.
*
* If HiveContext does not find a hive-site.xml file in the classpath, it creates the metastore_db and
* warehouse directories in the current directory. Therefore, you could end up with multiple copies of these
* directories if you launch a Spark SQL application from different directories. To avoid this, it is recommended
* to add hive-site.xml file to Spark’s conf directory if you use HiveContext*/
// Can't make it work, need to revisit
// hiveContextOperation(personsDF)
}
def sqlContextOperation(spark:SparkSession,sourceData:DataFrame):Unit={
/*
* Before spark 2.0+, we need to build first a sparkContext (for spark core), then create a sqlContext based on the
* sparkContext
*
* val config = new SparkConf().setAppName("App_Name")
* val sc=new SparkContext(config)
* val sqlContext= new SQLContext(sc)
* */
/* The SQLContext class provides a method named sql, which executes a SQL query using Spark. It takes a SQL
* statement as an argument and returns the result as an instance of the DataFrame class. In spark 2.0+, we don't
* need to use sqlContext to call sql, we can use sparkSession to call sql directly.
*
* To use sql function on a dataframe/dataset, we must register the dataframe/dataset as a sql temporary view.
* Temporary views in Spark SQL are session-scoped and will disappear if the session that creates it terminates.
* If you want to have a temporary view that is shared among all sessions and keep alive until the Spark
* application terminates, you can create a global temporary view. Global temporary view is tied to a system
* preserved database global_temp, and we must use the qualified name to refer it, e.g.
* SELECT * FROM global_temp.view1.*/
// for spark 2.0+, we can get sqlContext directly from the sparkSession
val sqlContext = spark.sqlContext
sourceData.createOrReplaceTempView("persons")
//Global temp view
sourceData.createOrReplaceGlobalTempView("globalPersons")
//use spark session to call sql
spark.sql("select * from global_temp.globalPersons where name=='Bob'").show
//use sqlContext to call sql
val sqlRes1=sqlContext.sql("select * from persons")
//sqlRes1.show()
val ageOf30=sqlContext.sql("select * from persons where age==30")
ageOf30.show()
}
def hiveContextOperation(sourceData:DataFrame):Unit={
/*******************************4.4.2.1 Create sparkSession with hive connectivity********************************/
//if you want to use sparkSesseion to access hive table, you need to give the location of hive warehouse
/* You can find the warehouse location at hive-site.xml in hive/conf dir. It looks like this
* <name>hive.metastore.warehouse.dir</name>
<value>hdfs://127.0.0.1:9000/user/hive/warehouse</value>
*/
//val hiveWarehouseLocation="hdfs://127.0.0.1:9000/user/hive/warehouse"
val localWarehouse="file:///tmp/spark-warehouse"
val spark=SparkSession
.builder()
.appName("spark_access_hive_table")
//.config("hive.metastore.uris","thrift://127.0.0.1:9083")
//.config("spark.driver.allowMultipleContexts", "true")
// hive.metastore.warehouse.dir is depracted since spark 2.0, use spark.sql.warehouse instead
.config("spark.sql.warehouse.dir",localWarehouse)
//.config("spark.sql.catalogImplementation","hive")
.enableHiveSupport()
.getOrCreate()
/* .enableHiveSupport() provides HiveContext functions. So you're able to use catalog functions since spark
* has provided connectivity to hive metastore on doing .enableHiveSupport()*/
/* I was unable to connect to a existing hive data warehouse, the above will create a local spark-warehouse in
the local disk. Need revisit this!!!*/
/**************************** 4.4.2.1 Write data to hive table *******************************/
/* Get metadata from the warehous catalog*/
//spark.sql("show tables").show()
spark.catalog.listDatabases().show(false)
spark.catalog.listTables().show(false)
println(s"Spark conf ${spark.conf.getAll.mkString("\n")}")
val hiveInputDF=sourceData.coalesce(1)
//spark.sql("DROP TABLE IF EXISTS persons")
spark.sql("CREATE TABLE persons (Id Long, Name String, Age Int)")
hiveInputDF.write.mode(SaveMode.Overwrite).saveAsTable("persons")
//read tables in hive
spark.sql("show tables").show()
spark.sql("select * from persons").show()
}
case class Person(Id: Long, Name: String, Age: Int)
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson13_Anomaly_Detection/Lesson13_Anomaly_Detection_With_Streaming.scala | package org.pengfei.Lesson13_Anomaly_Detection
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.{Dataset, SparkSession}
object Lesson13_Anomaly_Detection_With_Streaming {
def main(args:Array[String]):Unit={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder.appName("Lesson13_Anomaly_Detection").master("local[2]").getOrCreate()
import spark.implicits._
val host="localhost"
val port=9999
val lines=spark.readStream.format("socket").option("host",host).option("port",port).load()
val data=lines.as[String].flatMap(_.split(","))
data.isStreaming // Returns True for DataFrames that have streaming sources
data.printSchema
val countDs = data.groupByKey(value=>value).flatMapGroups{
case (value, iter)=> Iterator((value, iter.length))
}.toDF("value", "count")
val query=data.writeStream.outputMode("append").format("console").trigger(Trigger.ProcessingTime("5 seconds")).start()
query.awaitTermination()
}
/* 0,tcp,http,SF,215,45076,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,1,0.00,0.00,0.00,0.00,1.00,0.00,0.00,0,0,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,normal.
*/
case class Connection(duration:Int, protocol_type:String, service:String, flag:String,
src_bytes:Int, dst_bytes:Int, land:Int, wrong_fragment:Int, urgent:Int,
hot:Int, num_failed_logins:Int, logged_in:Int, num_compromised:Int,
root_shell:Int, su_attempted:Int, num_root:Int, num_file_creations:Int,
num_shells:Int, num_access_files:Int, num_outbound_cmds:Int,
is_host_login:Int, is_guest_login:Int, count:Int, srv_count:Int,
serror_rate:Double, srv_serror_rate:Double, rerror_rate:Double, srv_rerror_rate:Double,
same_srv_rate:Double, diff_srv_rate:Double, srv_diff_host_rate:Double,
dst_host_count:Int, dst_host_srv_count:Int,
dst_host_same_srv_rate:Double, dst_host_diff_srv_rate:Double,
dst_host_same_src_port_rate:Double, dst_host_srv_diff_host_rate:Double,
dst_host_serror_rate:Double, dst_host_srv_serror_rate:Double,
dst_host_rerror_rate:Double, dst_host_srv_rerror_rate:Double,
label:String)
/*
*
* case Array(duration, protocol_type, service, flag,
src_bytes, dst_bytes, land, wrong_fragment, urgent,
hot, num_failed_logins, logged_in, num_compromised,
root_shell, su_attempted, num_root, num_file_creations,
num_shells, num_access_files, num_outbound_cmds,
is_host_login, is_guest_login, count, srv_count,
serror_rate, srv_serror_rate, rerror_rate, srv_rerror_rate,
same_srv_rate, diff_srv_rate, srv_diff_host_rate,
dst_host_count, dst_host_srv_count,
dst_host_same_srv_rate, dst_host_diff_srv_rate,
dst_host_same_src_port_rate, dst_host_srv_diff_host_rate,
dst_host_serror_rate, dst_host_srv_serror_rate,
dst_host_rerror_rate, dst_host_srv_rerror_rate,
label)=Connection(duration.toInt, protocol_type, service, flag,
src_bytes, dst_bytes, land, wrong_fragment, urgent,
hot, num_failed_logins, logged_in, num_compromised,
root_shell, su_attempted, num_root, num_file_creations,
num_shells, num_access_files, num_outbound_cmds,
is_host_login, is_guest_login, count, srv_count,
serror_rate, srv_serror_rate, rerror_rate, srv_rerror_rate,
same_srv_rate, diff_srv_rate, srv_diff_host_rate,
dst_host_count, dst_host_srv_count,
dst_host_same_srv_rate, dst_host_diff_srv_rate,
dst_host_same_src_port_rate, dst_host_srv_diff_host_rate,
dst_host_serror_rate, dst_host_srv_serror_rate,
dst_host_rerror_rate, dst_host_srv_rerror_rate,
label)*/
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/GPFSStats.scala | package org.pengfei.spark.application.example
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
import java.time.{Instant,ZoneId,ZonedDateTime}
object GPFSStats {
def main(args:Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark = SparkSession.builder().
master("local[2]"). //spark://10.70.3.48:7077 remote
appName("GPFSStats").
getOrCreate()
//spark.conf.set("")
import spark.implicits._
val inputFile = "file:///DATA/data_set/spark/gpfs_stats_min"
//val inputFile="hdfs://hadoop-nn.bioaster.org:9000/test_data/bioaster-2018-03-06.fist"
val schema = StructType(Array(
StructField("Blocks", LongType, true),
StructField("Perms", LongType, true),
StructField("nlinks", IntegerType, true),
StructField("Uid", LongType, true),
StructField("Gid", LongType, true),
StructField("Size", LongType, true),
StructField("Mtime", LongType, true),
StructField("Atime", LongType, true),
StructField("Ctime", LongType, true),
StructField("Name", StringType, true)))
val df = spark.read.format("com.databricks.spark.csv").option("delimiter", ":").schema(schema).load(inputFile)
//df.show(5)
df.count()
//df.filter($"Name".contains(".delta")).select($"Name",$"Blocks").groupBy($"Name".contains(".delta")).sum("Blocks").show(5)
//val dir= df.select($"Perms",$"Name").distinct().filter($"Perms"===200040775)
//dir.collect()
/*1. transfrom Perms column into DataType and acl*/
val result = df.withColumn("DataType", expr("substring(Perms, 1, length(Perms)-4)")).withColumn("ACL", expr("substring(Perms, length(Perms)-3,length(Perms))")).drop("Perms")
/*2. transform Name into FileName and ParentDir*/
/*3. transform Atime to ADate*/
/*4. Uid to username*/
/*Q1. list the numbers of fastq not compressed of each dir, for each target dir, give the list of all non compressed file Name*/
/*Q2. Do Q1 only for data in UserData */
/*Q3. find all duplicate data (same name, same size)*/
/*Q4. Get data not accessed since 1 year*/
val out= result.limit(5)
//out.write.format("csv").option("header","true").save("file:///tmp/test.csv")
//result.select($"DataType",$"Name").distinct().show(5)
//get user 42968 file number
//result.filter($"Uid"===42968).select($"Uid",$"Size").groupBy("Uid").count().show(5)
//get user 42968 .fastq file size
// result.filter($"Uid"===42968).filter($"Name".contains(".fastq")).select($"Uid",$"Size").groupBy("Uid").sum("Size").show()
// val userList= result.select($"Uid").distinct().rdd.map(r=>r(0)).collect()
/*//get all user space usage size
val spaceSize = result.select($"Uid", $"Size").groupBy("Uid").sum("Size")
//create column
val newdf = spaceSize.withColumn("SizeH", col("sum(Size)")).drop("sum(Size)")
newdf.show(5)
spark.udf.register("getSize", (arg1: Long) => getSize(arg1))
val finaldf = newdf.withColumn("Size", expr("getSize(SizeH)"))
finaldf.show(5)*/
val convertDate=result.select($"Atime")
spark.udf.register("getDate", (arg: Long)=>getDate(arg))
spark.udf.register("getFileName", (arg:String)=>getFileName(arg))
spark.udf.register("getParentDir",(arg:String)=>getParentDir(arg))
val dateDF=convertDate.withColumn("ADate",expr("getDate(Atime)"))
val tmpDF=out.withColumn("FileName",expr("getFileName(Name)"))
.withColumn("ParentDir",expr("getParentDir(Name)"))
dateDF.show(5)
tmpDF.show(5)
tmpDF.groupBy($"FileName").count().sort($"count".desc).show()
println(getDateInMillis("03/08/2017"))
//tmpDF.write.format("csv").option("header","true").save("file:///tmp/fileName.csv")
//newdf.withColumn("Size",getSize(("SizeH))
/*for(user<-userList){
println(user)
result.filter($"Uid"===user).select($"Uid",$"Size").groupBy("Uid").sum("Size").show()
}
}
}
}*/
}
/*def main(args:Array[String]): Unit ={
//val rawSize:Long= 6403617598L
//getSize(rawSize)
val rawDate=1494438629
getDate(rawDate)
}*/
def getSize(rawSize:Long): String ={
val unit:Array[String]=Array("B","KB","MB","GB","TB")
var index=0
var tmpSize:Long=rawSize
while(tmpSize>=1024){
tmpSize=tmpSize/1024
index+=1
}
return tmpSize+unit(index)
}
def getFileName(fullPath:String):String={
val fileName=fullPath.substring(fullPath.lastIndexOf("/")+1)
return fileName
}
def getParentDir(fullPath:String):String={
val parentDir=fullPath.substring(0,fullPath.lastIndexOf("/"))
return parentDir
}
def getDate(rawDate:Long):String={
val timeInMillis = System.currentTimeMillis()
val instant = Instant.ofEpochSecond(rawDate)
//instant: java.time.Instant = 2017-02-13T12:14:20.666Z
val zonedDateTimeUtc= ZonedDateTime.ofInstant(instant,ZoneId.of("UTC"))
//zonedDateTimeUtc: java.time.ZonedDateTime = 2017-02-13T12:14:20.666Z[UTC]
val zonedDateTimeCet=ZonedDateTime.ofInstant(instant,ZoneId.of("CET"))
/* println("Current time in milisecond"+ timeInMillis)
println("Current time: "+Instant.ofEpochMilli(timeInMillis))
println("Instant time :"+instant)
println("UTC time :"+zonedDateTimeUtc)
println("CET time :"+zonedDateTimeCet)*/
zonedDateTimeUtc.toString
}
def getDateInMillis(date:String):Long={
val format=new java.text.SimpleDateFormat("m/dd/yyyy")
val time=format.parse(date).getTime()/1000
return time
}
}
/*
*
* blocks perms nlinks uid gid size mtime atime ctime name ("name" is "name -> lname" when the object is a link)
blocks: 1 bloc for 1 mb in gpfs, if file < 50 oct, write direct in meta data, else in a sub block, for example a file of 1.5 mb, it will use 2 block and we lose 0.5mb of space.
perms: mode_t
atime, ctime, mtime: date Unix epoch based
blocks->0:perms->200100664:nlinks->1:uid->42968:gid->8000:size->140:mtime->1494438629:atime->1494438629:ctime->1494438629:name->pt2/ama/
*
* */
/*
*1515366180000
*1494438629
* val myUDf = udf((s:String) => Array(s.toUpperCase(),s.toLowerCase()))
val df = sc.parallelize(Seq("Peter","John")).toDF("name")
val newDf = df
.withColumn("udfResult",myUDf(col("name")))
.withColumn("uppercaseColumn", col("udfResult")(0))
.withColumn("lowercaseColumn", col("udfResult")(1))
.drop("udfResult")
newDf.show()
* */
/*
* +------+------+-----+----+----+----------+----------+----------+--------------------+--------+----+
|Blocks|nlinks| Uid| Gid|Size| Mtime| Atime| Ctime| Name|DataType| ACL|
+------+------+-----+----+----+----------+----------+----------+--------------------+--------+----+
| 0| 1|42968|8000| 140|1494438629|1494438629|1494438629|pt2/ama/nanostrin...| 20010|0664|
| 0| 1|42968|8000| 140|1494438629|1494438629|1494438629|pt2/ama/nanostrin...| 20010|0664|
| 0| 1|42968|8000| 140|1494438629|1494438629|1494438629|pt2/ama/nanostrin...| 20010|0664|
| 0| 1|42968|8000| 140|1494438629|1494438629|1494438629|pt2/ama/nanostrin...| 20010|0664|
| 0| 1|42968|8000| 140|1494438629|1494438629|1494438629|pt2/ama/nanostrin...| 20010|0664|
+------+------+-----+----+----+----------+----------+----------+--------------------+--------+----+
*
* atime -> File access time
* Access time shows the last time the data from a file was accessed – read by one of
* the Unix processes directly or through commands and scripts.
*
* ctime -> File change time
* ctime also changes when you change file's ownership or access permissions. It will
* also naturally highlight the last time file had its contents updated.
*
* mtime -> File modify time
* Last modification time shows time of the last change to file's contents. It does
* not change with owner or permission changes, and is therefore used for tracking
* the actual changes to data of the file itself.
* */ |
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson03_Spark_Application/Lesson03_2_Spark_Application.scala | package org.pengfei.Lesson03_Spark_Application
import org.apache.spark.sql.SparkSession
object Lesson03_2_Spark_Application {
def main(args: Array[String]) = {
/** *****************************Spark API Entry Point: SparkSession ****************************/
/* Since spark 2.0+, SparkSession is the entry point into all spark functionality. It replace all the other
* entry point for spark core (sparkContext), spark-sql(sqlContext/hiveContext). To create a basic sparkSession,
* just use SparkSession.builder()*/
val spark=SparkSession
.builder()
.master("local[2]")
.appName("Lesson3_2_Spark_Application")
.getOrCreate()
//for implicit conversions like converting RDDs to dataframes
import spark.implicits._
}
}
|
pengfei99/Spark | LearningSpark/src/main/java/org/pengfei/Lesson11_Spark_Application_Product_Recommendation/Lesson11_Recommendation.scala | package org.pengfei.Lesson11_Spark_Application_Product_Recommendation
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.ml.recommendation._
import scala.util.Random
object Lesson11_Recommendation {
/**********************************************11.1 Introduction ***********************************************/
/*In this lesson, we will build an application which recommend music for users*/
/**********************************************11.2 Data set ***********************************************/
/* In this lesson, we will use a dataset which are published by audioscrobbler. audioscrobbler was the first music
* recommendation system for last.fm.
*
* Audioscrobbler provided an open API for “scrobbling,” or recording listeners’ song plays. last.fm used
* this information to build a powerful music recommender engine. The system reached millions of users
* because third party apps and sites could provide listening data back to the recommender engine.
*
* At that time, research on recommender engines was mostly confined to learning from rating-like data.
* That is, recommenders were usually viewed as tools that operated on input like “Bob rates Prince 3.5 stars.”
*
* The Audioscrobbler data set is interesting because it merely records plays: “Bob played a Prince track.” A play
* carries less information than a rating. Just because Bob played the track doesn’t mean he actually liked it.
* You or I may occasionally play a song by an artist we don’t care for, or even play an album and walk out of
* the room.
*
* However, listeners rate music far less frequently than they play music. A data set like this is therefore
* much larger, covers more users and artists, and contains more total information than a rating data set,
* even if each individual data point carries less information. This type of data is often called implicit
* feedback data because the userartist connections are implied as a side effect of other actions, and not given as
* explicit ratings or thumbs-up.
*
* You can download the sample data from wget http://www.iro.umontreal.ca/~lisa/datasets/profiledata_06-May-2005.tar.gz
* It contains 3 files :
* - user_artist_data.txt has 3 columns: userid artistid playcount
* - artist_data.txt has 2 columns: artistid artist_name
* - artist_alias.txt has 2 columns: badid, goodid (known incorrectly spelt artists and the correct artist id.
you can correct errors in user_artist_data as you read it in using this file
(we're not yet finished merging this data)). For example, “<NAME>,” “<NAME>,” and “the smiths” may appear
as distinct artist IDs in the data set even though they are plainly the same.
* The main data set is in the user_artist_data.txt file. It contains about 141,000 unique users, and 1.6 million
* unique artists. About 24.2 million users’ plays of artists are recorded, along with their counts.
* */
/************************************** 11.2 Recommender Algorithm ***********************************************/
/* Unlike other recommendation data set which contains rating of artist by users. In our dataset, we only have
* information about the users, or about the artists other than their names. We need an algorithm that learns without
* access to user or artist attributes
*
* These are typically called "collaborative filtering algorithm". For example, deciding that two users might share
* similar tastes because they are the same age is not an example of collaborative filtering. Deciding that two
* users might both like the same song because they play many other same songs is an example.
*
* The sample data set contains millions of play counts. But for some user, the information is still skimpy, because on
* average, each user has played song from about 171 artists - out of 1.6 million. Some users have listened to only
* one artist. We need an algorithm that can provide decent recommendations to even these users.
*
* Finally, we need an algorithm that scales, both in its ability to build large models and to create
* recommendations quickly. Recommendations are typically required in near real time (within a second).
*
* In this Lesson, we will use a member of a broad class of algorithms called latent-factor models. They try to
* explain observed interactions between large numbers of users and items through a relatively small number of
* unobserved, underlying reasons. It is analogous to explaining why millions of people buy a particular few of
* thousands of possible albums by describing users and albums in terms of tastes for perhaps tens of genres,
* and tastes are not directly observable or given as data.
*
* For example, consider a user who has bought albums by metal bands, but also classical. It may be difficult to
* explain why exactly these albums were bought and nothing else. However, it's probably a small window on a much
* larger set of tastes. "liking metal, progressive rock, and classical" are three latent factors that could explain
* tens of thousands of individual album preferences.
*
* More specifically, this example will use a type of "matrix factorization" model. Mathematically, these
* algorithms represent the relation of user and product data as a large matrix A, where the entry at row i and column j
* exists if user i has played artist j. A is sparse: most entries of A are 0, because only a few of all possible
* user-artist combinations actually appear in the data. They factor A as the matrix product of two smaller matrices,
* X and Y. One row in X represents a user, each column of this row represents this user's latent features, one row in
* Y represents a item (in our case it's an artist), each column of the item's row represents this item's latent
* features. The product of XY (XY= A) , is the product of user's latent features and item's laten features, which
* represent user's taste of the item.
*
* There are algorithms that can calculate the factorization of the matrix, In this lesson, we use a Algorithm called
* "Alternating Least Squares" (ALS) to compute X and Y. The spark MLLib's ALS implementation draws on ideas from
* papers "Collaborative Filtering for Implicit Feedback Datasets" (http://yifanhu.net/PUB/cf.pdf) and
* "Large-Scale Parallel Collaborative Filtering for the Netflix Prize".
* */
def main(args:Array[String]):Unit ={
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val spark=SparkSession.builder().appName("Lesson11_Recommendation").master("local[2]").getOrCreate()
/************************************** 11.3 Preparing the data ***********************************************/
import spark.implicits._
val sparkConfig = ConfigFactory.load("application.conf").getConfig("spark")
val path= sparkConfig.getString("sourceDataPath")
val userArtistFilePath=s"${path}/spark_lessons/Lesson11_Recommendation/profiledata_06-May-2005/user_artist_data_small.txt"
val artistFilePath=s"${path}/spark_lessons/Lesson11_Recommendation/profiledata_06-May-2005/artist_data.txt"
val artistAliasPath=s"${path}/spark_lessons/Lesson11_Recommendation/profiledata_06-May-2005/artist_alias.txt"
val rawUserArtist:Dataset[String]=spark.read.textFile(userArtistFilePath)
// rawData.show(5)
/* Each line of the file contains a user ID, an artist ID, and a play count, separated by spaces*/
val userArtistDF=rawUserArtist.map{line=>
val Array(user,artist,_*)=line.split(" ")
(user.toInt, artist.toInt)
}.toDF("user","artist")
// userArtistDF.show(5)
// userArtistDF.agg(min("user"),max("user"),min("artist"),max("artist")).show()
/* +---------+---------+-----------+-----------+
|min(user)|max(user)|min(artist)|max(artist)|
+---------+---------+-----------+-----------+
| 90| 2443548| 1| 10794401|
+---------+---------+-----------+-----------+
* The above stats tells us the max and min id of user and artist. The artist name and id corresponding
* table is in artist_data.txt */
val rawArtist=spark.read.textFile(artistFilePath)
// rawArtist.show(5)
/*val nullLine=rawArtist.filter(col("value").isNull).count()
println(s"empty line number: ${nullLine}")*/
/* If we use the previous code to parse the file, it will fail, because the separation of the two column in some
* rows are one tab, and some rows are more than one tab, the split will return some time 2 items, or 4 items.
* So we need to use span, span() splits the line by its first tab by consuming characters that aren’t tabs. It
* then parses the first portion as the numeric artist ID, and retains the rest as the artist name. name.trim will
* remove all whitespace and tab.
*
* val artistByID=rawArtist.map{line=>
val (id,name)=line.span(x=>x!='\t')
(id.toInt, name.trim)
}.toDF("id","name")
*
* But this code still return java.lang.NumberFormatException: For input string: "Aya Hisakawa"
* Which means, some rows do not have tab and can't be split correctly. This cause id contains string which makes
* toInt cause NumberFormatException.
*
* the map() function must return exactly one value for every input, so it can’t be used. It’s possible to
* remove the lines that don’t parse with filter(), but this would duplicate the parsing logic. The flatMap()
* function is appropriate when each element maps to zero, one, or more results because it simply “flattens”
* these collections of zero or more results from each input into one big data set.*/
val artistByID:DataFrame=rawArtist.flatMap{line=>
val (id,name)=line.span(_ != '\t')
//if name is empty, it means the split is wrong, so abandon this row
if(name.isEmpty){
None
}else{
// Even a row can be split into two parts, the id column may still contain string
try{
Some((id.toInt, name.trim))
} catch {
// abandon all rows which we cannot cast Id to int
case _:NumberFormatException =>None
}
}
}.toDF("id","name")
//artistByID.show(5)
/* The artist alias file maps artist IDs that may misspelled or nonstandard to the ID of the artist’s canonical
* name. It contains two IDs per line, separated by a tab. This file is relatively small, containing about 200,000
* entries. It will be useful to collect it as a Map, mapping “bad” artist IDs to “good” ones, instead of just
* using it as a data set of pairs of artist IDs. Again, some lines are missing the first artist ID for some reason,
* and are skipped: */
val artistAliasRaw=spark.read.textFile(artistAliasPath)
artistAliasRaw.show(5)
val artistAlias=artistAliasRaw.flatMap{line=>
val Array(artist,alias)=line.split("\t")
if(artist.isEmpty){
None
}else{
Some((artist.toInt,alias.toInt))
}
}.collect().toMap
// head returns the first element of the map
val head=artistAlias.head
println(s"${head.toString()}")
//we want to see the artist name of id and alias (1208690,1003926)
val firstArtistName=artistByID.filter(col("id").isin(1208690,1003926)).show()
/*After all the transformation the artistAlias is map<Int,Int> the key is bad id, the value is good id of the artist
* So we need to use this information to replace all bad id in user_artist_data.txt to good id.
*
* The getOrElse(key,defaultValue) method will try to find the value of the given key, if not found, return the
* default value. The following code is an example.
*
* val finalArtistID=artistAlias.getOrElse(10794401,1)
* println(s"finalArtistID has value : ${finalArtistID}")
* */
/* The buildCounts method can replace all bad id in user_artist dataset, */
/********************** 11.4 Broadcast variables (immutable data) ***************************************/
/* We could noticed the artistAlias has a map type, the local map is on the spark driver, When we run the spark job
* this map will be copied automatically to every task. However, it is not tiny, consuming about 15 megabytes
* in memory and at least several megabytes in serialized form. Because any tasks execute in one JVM, it is
* wasteful to send and store so many copies of the data.
*
* Instead, we create a broadcast variable called bArtistAlias for artistAlias. This makes Spark send and hold
* in memory just one copy for each executor in the cluster. When there are thousands of tasks and many execute
* in parallel on each executor, this can save significant network traffic and memory.
*
* When spark runs a stage, it creates a binary representation of all the information needed to run tasks in that
* stages; this is called the closure of the function that needs to be executed. This closure includes all the data
* structures on the driver referenced in this function. Spark distributes it with every task that is sent to an
* executor on the cluster
*
* Broadcast variables are useful when many tasks need access to the same (immutable) data structure. They extend
* normal handling of task closures to enable:
* - Caching data as raw Java Objects on each executor, so they need not be deserialized for each task
* - Caching data across multiple jobs, stages, and tasks.
*
* DataFrame operations can at times also automatically take advantage of broadcasts when performing joins between
* a large and small table. Just broadcasting the small table is advantageous sometimes, this is called a
* broadcast hash join*/
//broadcast
val bArtistAlias=spark.sparkContext.broadcast(artistAlias)
//replace all bad id
val trainData:DataFrame=buildCounts(rawUserArtist,bArtistAlias)
trainData.cache()
/***************************************** 11.5 Build model ***************************************/
/*Now, our training data is ready, we can build our model*/
val alsModel=new ALS()
.setSeed(Random.nextLong())
.setImplicitPrefs(true)
.setRank(10)
.setAlpha(1.0)
.setRegParam(0.01)
.setMaxIter(5)
.setUserCol("user")
.setItemCol("artist")
.setRatingCol("count")
.setPredictionCol("prediction")
.fit(trainData)
/* Based on your spark cluster, The operation will likely take minutes or more. Because ALS model has huge
* parameters and coefficients. For our dataset, it contains a feature vector of 10 values for each user and
* product. The final model contains these large user-feature and product-feature matrices as dataframes of
* their own.
*
* The results values of each run may be different. Because the model depends on a randomly chosen initial
* set of feature vectors
*
* To see some feature vectors, try the following, which displays just one row and does not truncate the wide
* display of the feature vector*/
alsModel.userFactors.show(1,truncate=false)
/* The hyperparameters such as setAlpha, setMaxIter, etc. can affect the quality of the model. To set this
* hyperparameters correctly, we need to have a deep understanding of the ALS algo and the train data. This will
* be explain later. And we do have ways to check if the model is accurate or not?
* */
/***************************************** 11.6 Spot checking recommendations *******************************/
/*
* We should first see if the artist recommendations make any intuitive sense, by examining a user, plays, and
* recommendations for that user. Take, for example, user 2093760. First, let’s look at his or her plays to get
* a sense of the person’s tastes. Extract the IDs of artists that this user has listened to and print their names.
* This means searching the input for artist IDs played by this user, and then filtering the set of artists by
* these IDs in order to print the names in order:
*/
val userID = 2093760
// getUserPreferArtist(userID,trainData,artistByID)
/*
* | id| name|
+-------+---------------+
| 1180| <NAME>|
| 378| Blackalicious|
| 813| Jurassic 5|
|1255340|The Saw Doctors|
| 942| Xzibit|
+-------+---------------+
*
* The user 2093760 looks like loves a mix of mainstream pop and hip-hop. The following two method returns the model
* recommendation for this user*/
println("###################################old way#############################################")
//val oldRecom=oldMakeRecommendations(alsModel,userID,5)
// oldRecom.show(truncate = false)
println("####################################new way############################################")
val newRecom=newMakeRecommendations(alsModel,userID,artistByID)
newRecom.show(truncate = false)
/*************************************** 11.7 Evaluating Recommendation Quality ******************************/
/* We can compute the recommender’s score by comparing all held-out artists’ ranks to the rest.
* (In practice, we compute this by examining only a sample of all such pairs, because a potentially
* huge number of such pairs may exist.) The fraction of pairs where the held-out artist is ranked higher
* is its score. A score of 1.0 is perfect, 0.0 is the worst possible score, and 0.5 is the expected value
* achieved from randomly ranking artists.
*
* This metric is directly related to an information retrieval concept called the receiver operating characteristic
* (ROC) curve. The metric in the preceding paragraph equals the area under this ROC curve, and is indeed known as
* AUC, or Area Under the Curve. AUC may be viewed as the probability that a randomly chosen good recommendation
* ranks above a randomly chosen bad recommendation.
*
* The AUC metric is also used in the evaluation of classifiers. It is implemented, along with related methods,
* in the MLlib class BinaryClassificationMetrics. For recommenders, we will compute AUC per user and average
* the result. The resulting metric is slightly different, and might be called “mean AUC.” We will implement this,
* because it is not (quite) implemented in Spark.
*
* Other evaluation metrics that are relevant to systems that rank things are implemented in RankingMetrics.
* These include metrics like precision, recall, and mean average precision (MAP). MAP is also frequently used
* and focuses more narrowly on the quality of the top recommendations. However, AUC will be used here as a common
* and broad measure of the quality of the entire model output.*/
/*************************************** 11.7.1 Computing AUC ******************************/
/* The code for calculate areaUnderCurve is not complete. We know from the book, the result is about 0.879.
* It is certainly higher than the 0.5 that is expected from making recommendations randomly, and it’s close
* to 1.0, which is the maximum possible score. Generally, an AUC over 0.9 would be considered high
*
* But is it an accurate evaluation? This evaluation could be repeated with a different 90% as the training set.
* The resulting AUC values’ average might be a better estimate of the algorithm’s performance on the data set.
* In fact, one common practice is to divide the data into k subsets of similar size, use k – 1 subsets together
* for training, and evaluate on the remaining subset. We can repeat this k times, using a different set of subsets
* each time. This is called k-fold cross-validation.*/
// getAreaUnderCurveValue(rawUserArtist,bArtistAlias)
/**************************************11.8 Hyperparameter selection ****************************/
/* p60
* - setRank(10): The number of latent factors in the model, or equivalently, the number of columns k in the
* user-feature and product-feature matrices. In nontrivial cases, this is also their rank.
*
* - setMaxIter(5): The number of iterations that the factorization runs. More iterations take more time but
* may produce a better factorization.
*
* - setRegParam(0.01): A standard overfitting parameter, also usually called lambda. Higher values resist
* overfitting, but values that are too high hurt the factorization's accuracy.
*
* - setAlpha(1.0): Controls the relative weight of observed versus unobserved user-product interactions in the
* factorization.
*
* rank, regParam, and alpha can be considered hyperparameters to the model. (maxIter is more of a constraint
* on resources used in the factorization.) These are not values that end up in the matrices inside the ALSModel.
* Those are simply its parameters and are chosen by the algorithm. These hyperparameters are instead parameters
* to the process of building itself.
*
* The values showed above are not necessarily optimal. Choosing good hyperparameter values is a
* common problem in machine learning. The most basic way to choose values is to simply try combinations of values
* and evaluate a metric for each of them, and choose the combination that produces the best value of the metric.
*
* So far, (AUC=0.8928367485129145,(rank=30,regParam=4.0,alpha=40.0)) are the best I can do in this Lesson */
}
def buildCounts(rawUserArtistData:Dataset[String],bArtistAlias:Broadcast[Map[Int,Int]]):DataFrame={
import rawUserArtistData.sparkSession.implicits._
rawUserArtistData.map{line=>
val Array(userID, artistID, count) = line.split(" ").map(_.toInt)
val finalArtistID = bArtistAlias.value.getOrElse(artistID,artistID)
(userID,finalArtistID,count)
}.toDF("user","artist","count")
}
def getUserPreferArtist(userID:Int,userArtist:DataFrame,artistID:DataFrame):Unit={
import userArtist.sparkSession.implicits._
val existingArtistIDs:Array[Int]=userArtist.filter(col("user")===userID)
.select("artist").as[Int].collect()
artistID.filter(col("id") isin (existingArtistIDs:_*)).show()
}
/*old way to get recommendation*/
def oldMakeRecommendations(model:ALSModel,userID:Int,howMany:Int):DataFrame={
//select all artist ids and pair with target user ID.
val toRecommend = model.itemFactors.select(col("id").as("artist")).withColumn("user",lit(userID))
//score all artists, return top by score.
model.transform(toRecommend).select("artist","prediction").orderBy(col("prediction").desc).limit(howMany)
}
/*new way to get recommendation*/
def newMakeRecommendations(model:ALSModel,userID:Int,artistID:DataFrame):DataFrame={
import artistID.sparkSession.implicits._
val recommendation=model.recommendForAllItems(userID)
val recomArtistID=recommendation.select("artist").as[Int].collect()
val recomArtistDF=artistID.filter(col("id") isin (recomArtistID:_*))
return recomArtistDF
}
def getAreaUnderCurveValue(rawUserArtist:Dataset[String],bArtistAlias:Broadcast[Map[Int,Int]]):Unit={
val spark=rawUserArtist.sparkSession
import spark.implicits._
val allData=buildCounts(rawUserArtist,bArtistAlias)
val Array(trainData,cvData)=allData.randomSplit(Array(0.9,0.1))
trainData.cache()
cvData.cache()
val allArtistIDs = allData.select("artist").as[Int].distinct().collect()
val bAllArtistIDs = spark.sparkContext.broadcast(allArtistIDs)
val model = new ALS().
setSeed(Random.nextLong()).
setImplicitPrefs(true).
setRank(10).setRegParam(0.01).setAlpha(1.0).setMaxIter(5).
setUserCol("user").setItemCol("artist").
setRatingCol("count").setPredictionCol("prediction").fit(trainData)
val aucValue=areaUnderCurve(cvData, bAllArtistIDs, model.transform)
println(s" areaUnderCurve value is ${aucValue}")
}
def areaUnderCurve(
positiveData: DataFrame,
bAllArtistIDs: Broadcast[Array[Int]],
predictFunction: (DataFrame => DataFrame)): Double = {
// can't find the code
return 0.879
}
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/ml/feature/select/KeyWordSelection.scala | package org.pengfei.spark.ml.feature.select
object KeyWordSelection {
}
|
pengfei99/Spark | WordCount/src/main/java/org/pengfei/spark/application/example/InvertedIndexShakespeare.scala | package org.pengfei.spark.application.example
import java.io.File
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql._
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
/*
*
* Inverted Index is mapping of content like text to the document in
* which it can be found. Mainly used in search engines, it provides
* faster lookup on text searches i.e to find the documents where the
* search text occurs.
*
*
* Problem Statement:
* 1. Dataset contains Shakespeare's works split among many files
* 2. The output must contain a list of all words with the file in which
* it occurs and the number of times it occurs
*
* */
object InvertedIndexShakespeare {
case class wordFile(wordName:String,wordCount:Int,fileName:String)
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val fileDir="/home/pliu/Downloads/data_set/inverted-index-master/dataset/shakespeare"
//get the file list
val fileList=getFileList(fileDir)
//fileList.foreach(file=>println("file://"+file.getAbsolutePath))
val spark = SparkSession.builder().master("local").appName("InvertedIndexShakespeare").getOrCreate()
import spark.implicits._
/*******************************************
*build wordCound df for all files in the directory and write df to parquet file
* *********************************************/
/*val wordCountDF=buildFullWordCountDataFrame(fileList,spark)
val parquetFilePath="file:///home/pliu/Downloads/data_set/inverted-index-master/dataset/parquet"
wordCountDF.write.parquet(parquetFilePath)*/
/*
* WE can use dataframe build in function to transform data
*
* */
//wordCountDF.filter($"word_name"=!="/s").orderBy($"word_count".desc).show(10)
/******************************************
* Read parquet file and create a view for sql query
* *****************************/
/* val wordCountDF=spark.read.parquet(parquetFilePath)
wordCountDF.createOrReplaceTempView("wordCount")
val popularWord=spark.sql("select * from wordCount where word_name <> ' ' ORDER BY word_count DESC")
popularWord.show(20)*/
//wordCountDF.orderBy($"word_count".desc).show(10)
// wordCountDF.show(10)
/*val testfilePath="file:///home/pliu/Downloads/data_set/inverted-index-master/dataset/shakespeare/0ws0110.txt"
val testDF=wordCount(testfilePath,spark,"0ws0110.txt")
testDF.orderBy($"word_count".desc).show(10)*/
}
/*
* This function takes a list of files and a spark session, count all words in all files
* return a dataframe
* */
def buildFullWordCountDataFrame(fileList:List[File],spark:SparkSession):DataFrame ={
import spark.implicits._
val sc = spark.sparkContext
val sqlC=spark.sqlContext
val schema = StructType(Array(
StructField("word_name",StringType,false),
StructField("word_count",IntegerType,false),
StructField("file_name",StringType,false)
))
var fullDf : DataFrame = sqlC.createDataFrame(sc.emptyRDD[Row],schema)
var totalColumn:Long = 0
for(file<-fileList){
val fileName=file.getName
val filePath="file://"+file.getAbsolutePath
val wordDF=wordCount(filePath,spark,fileName)
fullDf=fullDf.union(wordDF)
totalColumn=totalColumn+wordDF.count()
}
/*println("Total row :"+totalColumn)
println("Data frame row :"+ fullDf.count())*/
return fullDf
}
/*
* This function take a file path, spark session, a file name.
* it counts all words in this file and return a data frame
* */
def wordCount(filePath:String,spark:SparkSession,fileName:String):DataFrame= {
import spark.implicits._
val sc = spark.sparkContext
val textFile = sc.textFile(filePath)
//filter word.isEmpty eliminats white space words
val wordCount = textFile.flatMap(line=>line.split(" ")).filter(word => !word.isEmpty).map(word=>(word,1)).reduceByKey((a,b)=>a+b)
val wordDF=wordCount.map(atts=>wordFile(atts._1,atts._2.toInt,fileName)).toDF("word_name","word_count","file_name")
return wordDF
}
/*
* This function take a dir path, return a list of files in this dir*/
def getFileList(fileDir:String):List[File]={
val dir = new File(fileDir)
if(dir.exists() && dir.isDirectory){
val fileList=dir.listFiles().filter(file=>file.getName.startsWith("0ws")).toList
return fileList
}
else null
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.