package com.lmq

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{DoubleType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.joda.time.DateTime



case class DeviceData(device: String, deviceType: String, signal: Double, time: Long)

/**
 * Selection,Projection,Aggregation
 */
object BasicOp {


  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)
  val spark: SparkSession = SparkSession.builder()
    .master("local[*]")
    .appName(this.getClass.getName)
    .getOrCreate()
  import spark.implicits._

  val schema =  StructType(
    Array(
      StructField("device", StringType,nullable = true),
      StructField("deviceType", StringType, nullable = true),
      StructField("signal", DoubleType, nullable = true),
      StructField("time", LongType, nullable = true)

    )
  )

  // streaming DataFrame with IOT device data with
  // schema { device: string, deviceType: string, signal: double, time: string }
  //using file directory
//  val df = spark
//    .readStream
//    .option("sep", ",")
//    .schema(schema)
//    .csv("D:\\javaproject\\SparkStructuredStreaming\\src\\data\\sensors")

  // using socket
  val df1: DataFrame = spark
    .readStream
    .format("socket")
    .option("host", "localhost")
    .option("port", 9999)
    .load()
  val mysplit1 = udf((x:String)=>{
    val strings: Array[String] = x.split(",")
    strings(0)

  })

  val mysplit2 = udf((x:String)=>{
    val strings: Array[String] = x.split(",")
    strings(1)

  })
  val mysplit3 = udf((x:String)=>{
    val strings: Array[String] = x.split(",")
    strings(2).toDouble

  })
  val mysplit4 = udf((x:String)=>{
    val strings: Array[String] = x.split(",")
    strings(3).toLong

  })
  val df: DataFrame = df1.select(
    mysplit1(col("value")).as("device"),
    mysplit2(col("value")).as("deviceType"),
    mysplit3(col("value")).as("signal"),
    mysplit4(col("value")).as("time")
                          )
  df.printSchema()





  def main(args: Array[String]): Unit = {
    // streaming Dataset with IOT device data
    val ds: Dataset[DeviceData] = df.as[DeviceData]

    // Select the devices which have signal more than 10
    val sgB10 = ds.select("device").where("signal > 10") // using untyped API
    ds.filter(_.signal > 10).map(_.device) // using typed APIs

    //Runing count of number of updates for each device type
    df.groupBy("deviceType").count()
    //Running average signal for each device type
    import org.apache.spark.sql.expressions.scalalang.typed
    ds.groupByKey(_.deviceType).agg(typed.avg(_.signal))

    // you can also register a straming Df as temporary view and
    // then apply sql commands on it.
    df.createOrReplaceTempView("updates")
    val frame = spark.sql(
      """
        |select count(*) from updates
        |""".stripMargin)  // return another streaming DF
    val query = frame.writeStream
      .outputMode("update")
      .format("console")
      .start()

    val query1 = sgB10.writeStream
      .outputMode("update")
      .format("console")
      .start()

    query.awaitTermination()
    query1.awaitTermination()
  }


}
