//package com.xl.bigdata.spark
//
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.UserGroupInformation
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext, Time}
import org.apache.spark.{SparkConf, SparkContext}
import java.io.File
import scala.collection.JavaConversions.iterableAsScalaIterable

 import scala.collection.JavaConversions._
//object SparkKafka2Hive {
//  val cluster = "qdcdh"
//  val hadoopXml: String = cluster + "-" + "core-site.xml"
//  val hdfsXml: String = cluster + "-" + "hdfs-site.xml"
//  val hiveXml: String = cluster + "-" + "hive-site.xml"
//  val keytabString = "./pub_stream_dev.keytab"
//  val principal = "pub_stream_dev"
//
//  def main(args: Array[String]): Unit = {
//    val sparkConf = new SparkConf()
//    val hiveConfiguration = new Configuration()
//    hiveConfiguration.addResource(hiveXml)
//    hiveConfiguration.foreach(elem => {
//      sparkConf.set(elem.getKey, elem.getValue)
//    })
//    sparkConf.set("hive.exec.dynamic.partition", "true")
//    sparkConf.set("hive.exec.dynamic.partition.mode", "nonstrict")
//    val sc = new SparkContext(sparkConf)
//    val ssc = new StreamingContext(sc, Seconds(10))
//    val ss = SparkSession.builder.config(sparkConf)
//      .enableHiveSupport()
//      .getOrCreate
//    val jaasConfig = "com.sun.security.auth.module.Krb5LoginModule " +
//      "required serviceName='kafka' useKeyTab=true storeKey=true " +
//      "keyTab='" + keytabString + "' principal='" + principal + "' renewTicket=true;"
//
//    val kafkaParams = Map[String, Object](
//      "bootstrap.servers" -> "kafkapro01.haier.com:9092,kafkapro02.haier.com:9092,kafkapro03.haier.com:9092",
//      "key.deserializer" -> classOf[StringDeserializer],
//      "value.deserializer" -> classOf[StringDeserializer],
//      "group.id" -> "canal113_mes_19007653",
//      "auto.offset.reset" -> "latest",
//      "enable.auto.commit" -> (false: java.lang.Boolean),
//      "security.protocol" -> "SASL_PLAINTEXT",
//      "sasl.jaas.config" -> jaasConfig
//    )
//    val topics = Array("canal113_mes")
//    val stream = KafkaUtils.createDirectStream[String, String](
//      ssc,
//      PreferConsistent,
//      Subscribe[String, String](topics, kafkaParams)
//    )
//    stream.foreachRDD((rdd, time) => {
//      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//      yourCalculation(rdd, time, ss)
//      stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
//    })
//    ssc.start() // Start the computation
//    ssc.awaitTermination() // Wait for the computation to terminate
//  }
//
//  def yourCalculation(rdd: RDD[ConsumerRecord[String, String]], time: Time, ss: SparkSession): Unit = {
//    // step 1
//    if (rdd.isEmpty()) {
//      println("rdd is empty")
//      return
//    }
//    // step 2
//    //    rdd.foreachPartition(partition => {
//    //      partition.foreach(row => {
//    //        println(row)
//    //      })
//    //    })
//    //step 3
//    import spark.implicits._
//    val kafkaData = rdd.map(element => {
//      KafkaRecord(element.topic(),
//        element.partition(),
//        element.offset(),
//        element.timestamp(),
//        element.key(),
//        element.value())
//    }).toDF()
//    // step 4
//    val dataStruct: StructType = new StructType()
//      .add("database", StringType)
//      .add("table", StringType)
//      .add("optType", StringType)
//      .add("ts", LongType)
//      .add("es", LongType)
//      //      .add("pkNames", ArrayType(StringType))
//      .add("pkNames", StringType)
//      //      .add("data", MapType(StringType, StringType))
//      .add("data", StringType)
//      //      .add("old", MapType(StringType, StringType))
//      .add("old", StringType)
//    val parseDataFrame = kafkaData.select(from_json(col("value"), dataStruct).as("message"))
//      .select(col("message").getField("database").as("database")
//        , col("message").getField("table").as("table")
//        , col("message").getField("optType").as("optType")
//        , col("message").getField("ts").as("ts")
//        , col("message").getField("es").as("es")
//        , col("message").getField("pkNames").as("pkNames")
//        , col("message").getField("data").as("data")
//        , col("message").getField("old").as("old"))
//      .repartition(3)
//    parseDataFrame.createOrReplaceTempView("table_1")
//    // save
//    val hadoopConfiguration = new Configuration()
//    hadoopConfiguration.addResource(hadoopXml)
//    hadoopConfiguration.addResource(hdfsXml)
//    // set
//    hadoopConfiguration.foreach(elem => {
//      ss.sparkContext.hadoopConfiguration.set("spark.hadoop." + elem.getKey, elem.getValue)
//      ss.sparkContext.hadoopConfiguration.set(elem.getKey, elem.getValue)
//    })
//    // kerberos
//    println("setting UGI conf")
//    UserGroupInformation.setConfiguration(hadoopConfiguration)
//    val keytab = new File(keytabString)
//    println("Ability to read keytab '" + keytabString + "' : " + keytab.canRead)
//    println("logging in via UGI and keytab.")
//    UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytabString);
//    println("finished login attempt via UGI and keytab. security init " + UserGroupInformation.isSecurityEnabled)
//    // insert
//    ss.sql("insert into table bdp_jt.chengdianhu_spark_kafka_hfds_test_1 partition(pt) select `data`, `old`, `database` as database1, `table` as table1, `opttype`, `ts`, `es`, `pknames`, CURRENT_DATE() as pt from table_1")
//  }
//}
