package com.taoqi.ss

import java.sql.Timestamp

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
  * Created by TQ on 2017/9/21.
  */
object SSTest2 {


  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().master("local[2]").appName("kafka").getOrCreate()
    val dsReader = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "10.0.25.57:9092,10.0.25.48:9092,10.0.25.39:9092")
      .option("subscribe", "gps-data")
      .option("includeTimestamp", true)
      .load()
    import spark.implicits._
    //val df = dsReader.selectExpr("CAST(value as STRING)").as[(String, Timestamp)]
    val df = dsReader.selectExpr("CAST(value as STRING)", "CAST(timestamp as STRING)").as[(String, Timestamp)]

    //df.collect().foreach(println)     1 minutes
    /*val dsWriter = df.writeStream.trigger(Trigger.ProcessingTime("10 seconds")).format("console")
    val query: StreamingQuery = dsWriter.start()*/

    val schemaString = "imei,\n gpsTime,\n receiveTime,\n lng,\n lat,\n course,\n speed,\n gsmSignal," +
      "\n acc,\n targetingStatus,\n oilStatus,\n speedAlarm,\n vibrationAlarm,\n powerAlarm," +
      "\n cellId,\n lac,\n voltage,\n gpsSignal,\n electricity,\n gpsStatus,\n latStatus,\n lngStatus," +
      "\n powerStatus,\n sosStatus,\n workStatus,\n status,\n mcc,\n mnc,\n workStyle,\n sim,\n currentTime,\n source," +
      "\n province,\n city"

    println(dsReader.isStreaming)
    dsReader.printSchema()
    /*df.foreachPartition { ss =>
      ss.foreach(println)
    }*/
    /*df.transform { ss =>
      val rVal = ss.select("value")
      val df2 = spark.read.schema(JsonSchemaBuilder.getJsonSchema(schemaString)).json(rVal.toJSON)
        .selectExpr(JsonSchemaBuilder.columnSplitPattern.split(schemaString): _*)
      df2.printSchema()
      ss
    }*/
    val words = df.flatMap { ss =>
      println(ss._1)
      ss._1.split(" ").map(word => (word, ss._2))
    }.toDF("word", "timestamp")
    words.printSchema()
    import org.apache.spark.sql.functions._
    //按照时间窗口进行聚合  1分钟为一个大窗口 每30秒进行聚合一次进行窗口级别的统计 主要用于计算 时刻内ip访问次数 某一时刻PV UV等等
    //超过10秒内的数据将会进入下一次聚合
    // withWatermark 将把晚到的数据已汇入聚合当中 这里定义允许数据延迟1分钟
    val windowCounts = words
      .withWatermark("timestamp", "1 minutes")
      .groupBy(window($"timestamp", "1 minutes", "30 seconds"), $"word").count().orderBy("window")
    windowCounts.printSchema()
    //val windowCounts = words.groupBy($"word").count()//.orderBy("window")

    windowCounts.writeStream
      //.trigger(Trigger.ProcessingTime("10 seconds"))
      .outputMode("complete")
      .format("console")
      .option("truncate", "false")
      .start().awaitTermination()


    //query.awaitTermination()


    //dsReader.createOrReplaceGlobalTempView("")


  }


  def getSparkSession(conf: SparkConf, isNeedHive: Boolean = true) = SparkSession.synchronized {
    SparkSession.clearDefaultSession()
    @transient
    var session: SparkSession = null
    if (isNeedHive) {
      session = SparkSession.builder().config(conf).enableHiveSupport().getOrCreate()
    } else {
      session = SparkSession.builder().config(conf).getOrCreate()
    }
    SparkSession.clearDefaultSession()
    session
  }
}