package streaming

import com.alibaba.fastjson.JSON
import lombok.extern.slf4j.Slf4j
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkException}

import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.sql.{AnalysisException, SaveMode, SparkSession}
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}


object ReceiverTest {
  private val logger = Logger.getLogger("org.apache.spark")

  case class order(order_id: String, user_id: String, eval_set: String, order_number: String,
                   order_dow: String, order_hour_of_day: String, days_since_prior_order: String)

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("rdd2DF")
      .config("spark.sql.warehouse.dir", "hdfs://master:9000/user/hive/warehouse")
      .config("hive.exec.dynamic.partition","true")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .enableHiveSupport().getOrCreate()
    val conf = spark.sparkContext
    val ssc = new StreamingContext(conf, Seconds(10))
    //    ssc.checkpoint("hdfs:///streaming/checkpoint")
    //    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    //接受Kafka数据
    //需要消费Kafka中的topic
    val topicMap = Map {
      "badou" -> 2
    }
    val zkQuorum = "192.168.183.10:2181"
    val Dstream = KafkaUtils.createStream(ssc, zkQuorum, "group1", topicMap).map(_._2)
    import spark.implicits._
    Dstream.map { line =>
      val mess = JSON.parseObject(line, classOf[Orders])
      order(mess.order_id, mess.user_id, mess.eval_set, mess.order_number, mess.order_dow, mess.hour, mess.day)
    }.foreachRDD { rdd =>
      val df = rdd.toDF("order_id", "user_id", "eval_set",
        "order_number",
        "order_dow",
        "order_hour_of_day",
        "days_since_prior_order")
//      try {
//        df.write.mode(SaveMode.Overwrite).saveAsTable("default.db_mm")
//      } catch {
//        case ex: Exception =>
//          logger.info("====Exception====")
//      }
      df.show()
      df.write.mode(SaveMode.Append).insertInto("default.records_new1")
      logger.info("====")
    }
    ssc.start()
    ssc.awaitTermination()
  }
}
