package main.scala.demo

import java.util.Properties

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * SQLForDStreamDemo
  * streaming读取数据然后讲数据保存到数据库中
  * @author zhangyimin
  * @ 2018-10-16 下午4:57
  * @version 1.0
  */
object SQLForDStreamDemo {


  val jdbcUrl = "jdbc:mysql://localhost:3306/hive_etl?characterEncoding=utf-8&useSSL=false"

  def main(args: Array[String]): Unit = {
    //创建SparkStreaming对象
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //local[2]代表核数为2
    //    val spark = SparkSession.builder().appName("MyNetworkWordCount").master("local[2]").getOrCreate()
    //    val sc = spark.sparkContext
    val sparkConf = new SparkConf().setAppName("MyNetworkWordCount").setMaster("local[2]")
    val streamingContext = new StreamingContext(sparkConf, Seconds(3))
    val lines = streamingContext.socketTextStream("10.16.7.36", 5678, StorageLevel.MEMORY_ONLY_SER)
    val words = lines.flatMap(_.split(" "))


    //使用SparkSQL来查询Spark Streaming的流式数据
    words.foreachRDD(rdd => {
      val spark = SparkSession
        .builder()
        .master("local[2]")
        .appName("wordCount")
        .config(rdd.sparkContext.getConf)
        .getOrCreate()

      //将RDD转换成DataFrame

      import spark.implicits._
      val rddDF = rdd.toDF("word")

      rddDF.createOrReplaceTempView("words_tab")

      val result = spark.sql("select word,count(*) total from words_tab group by word")
//      result.show()


      val props = new Properties()
      //    props.setProperty("url","jdbc:mysql://localhost:3306/hive_etl?characterEncoding=utf-8&useSSL=false")
      props.setProperty("user", "root")
      props.setProperty("password", "123456")

      //判断数据非空
      if (result.count() != 0) {
        //表是否存在
        //select t.table_name from information_schema.TABLES t where t.TABLE_SCHEMA ="hive_etl" and t.TABLE_NAME ="rdd_demo_words";
        val isExit =
        spark.read.jdbc(jdbcUrl,"rdd_demo_words",props)


        //        spark.sql("select count(1) isExit from information_schema.TABLES t where t.TABLE_SCHEMA =\"hive_etl\" and t.TABLE_NAME =\"rdd_demo_words\"")
        if (isExit.count()> 0){
          result.write.mode("append")
            .jdbc(jdbcUrl, "rdd_demo_words", props)
        }else{
          result.write.mode("overwrite")
            .jdbc(jdbcUrl, "rdd_demo_words", props)
        }
      }

      result.show()


      //第一种方式
      //定义表结构schema
      //      val schema = StructType(List(
      //        StructField("words", StringType, true),

      //        StructField("total", IntegerType, true)
      //      ))
      //      //将RDD映射成一个ROW
      //      val rddRow = rdd.map(words => Row(words(0), words(1).toInt))
      //      //创建DataFrame
      //      val rowDF = spark.createDataFrame(rddRow, schema)
      //
      //
      //
      //      //第二种方式
      //      val rddBean=rdd.map(words => wordCount(words(0).toString, words(1).toInt))
      //      spark.createDataFrame(rddBean)


    })


    streamingContext.start()

    streamingContext.awaitTermination()

  }


  case class wordCount(words: String, total: Int)


}
