package main.scala.exec

import java.io.File
import java.util
import java.util.Map

import com.alibaba.fastjson.{JSON, JSONObject}
import com.typesafe.config.{Config, ConfigFactory, ConfigValue}
import main.scala.utils.ZkUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.log4j.Logger
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, DataFrameWriter, Dataset, Row, SparkSession, hive}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.HasOffsetRanges
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, sql, streaming}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark.streaming.StreamingContext

import scala.collection.mutable



object ItemcenterStart {

  val Log = Logger.getLogger("scanStart")
  var config: Config = null
  var sparkSession: SparkSession = null
  var zkUtil: ZkUtil = null
  var schemaTime: Long = 0

  private def setSparkSession = {
    val sparkConf = new SparkConf()
    val enteyConf: util.Iterator[Map.Entry[String, ConfigValue]] = config.getConfig("spark.conf").entrySet().iterator()
    while (enteyConf.hasNext) {
      val key: String = enteyConf.next().getKey
      sparkConf.set(key, config.getString(s"spark.conf.${key}"))
    }
    sparkSession = SparkSession.builder()
      .appName(config.getString("spark.appName"))
      .config("spark.sql.warehouse.dir", "/user/hive/warehouse")
      .config("tez.lib.uris", "/hdp/apps/3.1.4.0-315/tez/tez.tar.gz")
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()
    sparkSession.sparkContext.setLogLevel(config.getString("spark.logLevel"))
  }

  def setGlobalConfig(filePath: String): Unit = {
    if (filePath == null) {
      config = ConfigFactory.load("itemcenter.conf")
    } else {
      config = ConfigFactory.parseFile(new File(filePath))
    }
    if (null == config) {
      Log.error("配置文件加载失败，请检查配置文件是否存在")
    }
  }

  def init(args: Array[String]): Unit = {
    var filePath: String = null
    if (args.length > 0) {
      filePath = args(0)
    }
    setGlobalConfig(filePath)
    setSparkSession
     zkUtil = new ZkUtil(config.getString("dependence.zookeeper.servers"))
  }

  def getKafkaParams(config: Config): Map[String, Object] = {
    val kafkaParams: util.Iterator[Map.Entry[String, ConfigValue]] = config.getConfig("source.kafka.parameter").entrySet().iterator()
    import scala.collection._
    val map = new mutable.HashMap[String, Object]()
    while (kafkaParams.hasNext) {
      val key: String = kafkaParams.next().getKey
      map += (key -> config.getObject(s"source.kafka.parameter.$key"))
    }
    import scala.collection.JavaConverters._
    map.asJava
  }

  def getKafkaDstream(ssc: StreamingContext): InputDStream[ConsumerRecord[String, String]] = {
    val offsetPath: String = config.getString("dependencies.zookeeper.kafka_offset")
    val group: String = config.getString("source.kafka.group.id")
    val topics: String = config.getString("source.kafka.topics")
    val fromOffset: Predef.Map[TopicPartition, Long] = zkUtil.getFromOffsets(offsetPath, group, topics.split(","))
    val kafkaParams: util.Map[String, Object] = getKafkaParams(config)
    val initDstream: InputDStream[ConsumerRecord[String, String]] = if (fromOffset.isEmpty) {
      //import scala.collection.JavaConversions
      import collection.JavaConverters._
      KafkaUtils.createDirectStream[String, String](ssc, LocationStrategies.PreferConsistent, ConsumerStrategies.
        Subscribe(topics.split(",").toSet.asJava, kafkaParams))
    } else {
      // var utilSet: util.Set[String] = topics.split(",").toSet.asJavav
      import scala.collection.JavaConversions._
      //import scala.collection.JavaConverters._
      val strings: Array[String] = topics.split(",")
      val set11= strings
      KafkaUtils.createDirectStream[String, String](ssc, LocationStrategies.PreferConsistent, ConsumerStrategies.
        Subscribe(set11, kafkaParams, fromOffset))
    }
    initDstream
  }

  def updateSchema(rdd: RDD[JSONObject]): RDD[JSONObject] = {
    if(config.getBoolean("System.updateSchema") && System.currentTimeMillis() - schemaTime > config.getInt("System.SchemaTime") * 1000){
      import scala.collection.JavaConverters._
      //import scala.collection.JavaConversions._
      val dataTypes: mutable.Buffer[String] = config.getStringList("source.dataType").asScala.toBuffer

      val tupleRdd: mutable.Buffer[(String, RDD[String])] = for (dataType <- dataTypes) yield {
        val schemaRDD: RDD[String] = rdd.filter(_.getString("dataType") == dataType).flatMap(s => {
          val jsa: util.List[String] = s.getJSONArray("data").toJavaList(classOf[String])
          jsa.asScala
        })
        (dataType, schemaRDD)
      }
      for (elem <- tupleRdd;if( !elem._2.isEmpty())) {
        //https://blog.csdn.net/dz77dz/article/details/88802577
        implicit val matchError = org.apache.spark.sql.Encoders.STRING
        val df: DataFrame = sparkSession.read.json(sparkSession.createDataset(elem._2))
        val dtypes: Array[(String, String)] = df.dtypes
        val data: Array[String] = for (elem <- dtypes) yield {
          elem.toString()
        }
        if(dtypes.length > 0 ){
          zkUtil.storeSchema(config.getString("dependencies.zookeeper.data_structs") + "/" + elem._1,data)
        }
      }
     schemaTime =  System.currentTimeMillis()
    }
      rdd
  }


  def toStringArray(v: JSONObject): mutable.Seq[JSONObject] = {
    val dataType: String = v.getString("dataType")
    import scala.collection.JavaConverters._
    val lisbuf: mutable.Buffer[String] = v.getJSONArray("data").toJavaList(classOf[String]).asScala
    val objects: mutable.Buffer[JSONObject] = for(s<-lisbuf) yield{
      val nObject: JSONObject = JSON.parseObject(s).fluentPut("dataType", dataType)
      nObject
    }
    objects
  }

  def toTableInfo(rdd: RDD[JSONObject]):List[(String,String,DataFrame)] = {
    Log.info("transform DataFrame start...")
    implicit val matchError = org.apache.spark.sql.Encoders.STRING
    import scala.collection.JavaConverters._
    //import scala.collection.JavaConversions._
    val dataInfo: mutable.Buffer[(String, String, DataFrame)] = for (elem <- (config.getStringList("source.dataTypes").asScala)
                                                                                                                                                               if config.hasPath(s"sink.hive.dataTypes.${elem}")
                                                                                                                                                               if !rdd.filter(_.getString("dateType") == elem).isEmpty()) yield {
      val jsonStrRdd: RDD[String] = rdd.filter(_.getString("dataType") == elem
      ).map(_.toJSONString)
      val jsondata: Dataset[String] = sparkSession.createDataset(jsonStrRdd)
      sparkSession.read.json(jsondata).createOrReplaceTempView("tmp_" + elem)
      val tableInfo: Config = config.getConfig(s"sink.hive.dataTypes.${elem}")
      val table: String = tableInfo.getString("table")
      val partitionby: String = if (tableInfo.hasPath("partitionby")) tableInfo.getString("partitionby") else null
      val sql: String = if (tableInfo.hasPath("sql")) tableInfo.getString("sql") else "select * from tmp_" + elem
      val dfs: DataFrame = sparkSession.sql("sql")
      Log.info("datatype: " + elem + "\t" + "table: " + "\t" + "partition: " + "\t")
      Log.info("sql: " + sql)
      (table, partitionby, dfs)
    }
    Log.info("transform DataFrame finish...")
    dataInfo.toList
  }

  def stop(ssc: StreamingContext): Unit = {
    if(config.getBoolean("System.canStop") && config.getString("System.canStopType") == "zookeeper"){
      val stopFlag: String = config.getString("System.stopMark")
      println(
        """
          |>>>>>>>>
          |stoping...
          |""".stripMargin
      )
      if(zkUtil.getZkClient().deleteRecursive(stopFlag)){
        zkUtil.getZkClient().close()
        ssc.stop()
      }
    }
  }

  def start(ssc: StreamingContext): Unit = {
    val initDstream: InputDStream[ConsumerRecord[String, String]] = getKafkaDstream(ssc)
    val offsetRanges: Array[OffsetRange] = Array[OffsetRange]()
    val conf: Broadcast[Config] = sparkSession.sparkContext.broadcast(config)
   // val hive = HiveWarehouseBuilder.session(sparkSession).build()

    val kafkaDataDStream: DStream[JSONObject] = initDstream.transform {
      rdd =>
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        for (elem <- offsetRanges) {
          println(s"topic is ${elem.topic} partition  ${elem.partition} from_offset ${elem.fromOffset} until_offset ${elem.untilOffset}")
        }
        rdd
    }.map(_.value())
      .filter(!_.isEmpty)
      .map(JSON.parseObject)
      .filter(s => {
        val dataTypes: util.List[String] = conf.value.getStringList("source.dataType")
        dataTypes.contains(s.getString("dataType"))
      }).transform(updateSchema(_))
      .flatMap(toStringArray(_))
    kafkaDataDStream.foreachRDD(rdd=>{
      if(!rdd.isEmpty()){
       // hive.executeUpdate("set hive.exec.dynamic.partition=true")
        //sql.hive.executeUpdate("set hive.exec.dynamic.partition.mode=nonstrict")
       try {
         val listRdd: List[(String, String, DataFrame)] = toTableInfo(rdd)
         listRdd.filter(s => {
           s._1 != null && s._1.length > 0
         }).foreach(f => {
           val df: DataFrameWriter[Row] = f._3.write.format("com.hortonworks.spark.sql.hive.llap.HiveWarsehouseConnector")
             .mode("append")
             .option("table", f._1)
             .option("metastoreUri", config.getString("sink.hive.metastore_uri"))
           if (null == f._2) {
             df.partitionBy(f._2)
           }
           Log.info("写入hive...")
           df.save()
         })
         zkUtil.storeOffsets(config.getString("dependencies.zookeeper.kafka_offset"), config.getString("source.kafka.group.id"), offsetRanges)
       }catch
         {
           case t:Throwable => {
             t.printStackTrace()
             sparkSession.sparkContext.stop()
           }
         }
      }
      stop(ssc)
    })

  }

    def main(args: Array[String]): Unit = {
      init(args)
      val ssc = new StreamingContext(sparkSession.sparkContext, streaming.Seconds(config.getInt("source.freq")))
      start(ssc)
    }
  }
