package kafka_model

import java.sql.ResultSet
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.StreamingContext._
import scala.util.parsing.json.JSON

object kafka {
  def main(args: Array[String]): Unit = {
    println("环境搭建成功") //记录会出现潜在错误的情形 
    Logger.getLogger("org").setLevel(Level.WARN)

    //设置sparkContext当中spark相关的参数, 如:master的连接地址； spark WebUI中显示的应用程序的名称
    val conf = new SparkConf().setMaster("local[3]").setAppName("kafka")
    //创建streamingContext 批处理间隔5s
    val ssc = new StreamingContext(conf, Seconds(5))
    ssc.checkpoint("streaming") //设置检査点并从指定地点生成相关文件
    // kafka参数列表
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop1:9092", //kafka集群中的任意节点均可
      "key.deserializer" -> classOf[StringDeserializer], //获取键的权限
      "value.deserializer" -> classOf[StringDeserializer], //获取值的权限
      "group.id" -> "kafka01" //同组不会去重复消费topic
    )

    //指定主题
    val topics = Array("lagou")

//    println("=====Test Start=====")
//    var rs:ResultSet = null
//    val sql = "SELECT id FROM position"
//    rs = myConn.conn().prepareStatement(sql)excuteQuery()
//    println("=====Test End=====")

    //指定kafka数据源
    val kafkaStream = KafkaUtils.createDirectStream[String,String] (ssc,PreferConsistent,Subscribe[String,String](topics,kafkaParams))
    val events = kafkaStream.flatMap(line => Some(scala.util.parsing.json.JSON.parseFull(line.value())))
        .foreachRDD((rdd,index)=>{
          rdd.foreach(item=>{
            var position = item.get.asInstanceOf[Map[String,List[String]]]

            //Key 拼接字符串
            var keys = position.keys.mkString(",")

            //value拼接字符串
            var valueList:List[String] = Nil
            position.keys.foreach(item => {
              valueList = valueList :+  position(item).head
            })
            var values = valueList.mkString(",")

            //value 占位符字符串
            var valuesBlank = "%s,"*(position.size-1)
            valuesBlank = valuesBlank + "%s"

            var sql = s"INSERT INTO position ($keys) VALUES ($valuesBlank) ON DUPLICATE KEY UPDATE "

            //"$key = %s ,"字符串，Update用
            var update:List[String] = Nil
//            position.keys.foreach(key=>{
//              update = update + s"$key = %s ,"
//            })
            position.keys.foreach(key =>{
              update = update :+ (key + "= %s ")
            })
            var updateString:String=null
            updateString = update.mkString(",")

            sql = sql + updateString

            //两倍长度的values，用于format
            var doubleValues:List[String] = Nil
            position.values.foreach(item =>{
              doubleValues = doubleValues :+ ("\"" + item.head + "\"")
            })
            doubleValues = doubleValues:::doubleValues

            //  format字符串，原理未知 https://stackoverflow.com/questions/26469071/how-to-apply-tuple-to-a-format-string-in-scala
            val f: Seq[Any] => String = sql.format
            sql = f(doubleValues.toSeq)

            println(sql)
            DBUtil.getConnect().createStatement().executeUpdate(sql)





          })
        })
    print(events)
//    kafkaStream.print()
//    events.print()
    //将结果打印到控制台
    //TODO 连接kafka、JSON解析、存数据库（借鉴Python代码）

    ssc.start()
    ssc.awaitTermination()


    //解析JSON

//    val ds = kafkaStream.flatMap(position =>JSON.praseFull(position.value()).get.asIstanceOf[Map[String,Any]].get{"area"})
//      .map(value => (value,1))
//      .reducebyKey((item1,item2)=>(item1+item2))
//      .updateStatebyKey((currentValue:Seq[Int],preValue:Option[Int])=>{
//        //更新内存里面的数据
//        val currentCount = currentValue.sum
//        val preCount = preValue.getOrElse(0)
//        Some(currentCount + preCount)
//      }).foreachRDD((rdd,index)=>{
//      //输出内存数据
//      println("--------------------"+index)
//      rdd.foreach(item=>{
//        println(item._1 + ": " + item._2)
//
//      })
//    })
  }

}