package main.scala.exec

import java.io.File
import java.util

import com.alibaba.fastjson.{JSON, JSONArray}
import com.typesafe.config.{Config, ConfigFactory}
import main.scala.utils.ZkUtil
import org.apache.spark.sql.{DataFrame, DataFrameWriter, Dataset, Row, SparkSession, hive}



object promotioncenterAllWrite {
  var conf :Config = ConfigFactory.load("promotion.conf")

  def main(args: Array[String]): Unit = {
    if (args.length == 1){
      conf = ConfigFactory.parseFile(new File(args(0)))
    }
    val spark: SparkSession = SparkSession.builder().appName("promotioncenterAllWrite")
      .config("spark.sql.warehouse.dir", "/user/hive/warehouse")
      .config("tez.lib.uris", "/hdp/apps/3.1.4.0-315/tez/tez.tar.gz")
      .enableHiveSupport()
      .getOrCreate()

    //val hive = HiveWarehouseBuilder.session(spark).build()
    val zkUtil = new ZkUtil(conf.getString("dependencies.zookeeper.servers"))
    val initDataSet: Dataset[String] = spark.read.textFile("source.hdfs.path")
    implicit val matchError = org.apache.spark.sql.Encoders.STRING
    //import spark.implicits._
    //import scala.collection.JavaConversions._
    import scala.collection.JavaConverters._
    val jsonDataSet: Dataset[String] = initDataSet.flatMap {
      case s =>
        val res: util.List[String] = JSON.parseObject(s).getJSONArray("data").toJavaList(classOf[String])
        res.asScala
    }
    val frame: DataFrame = spark.read.json(jsonDataSet)
    frame.dtypes.foreach(println)
    val dTypeStr: Array[String] = for (elem <- frame.dtypes) yield {
      elem.toString()
    }
    zkUtil.storeSchema(conf.getString("dependencies.zookeeper.data_structs"+"/"+conf.getString("sink.hive.table").split(".")(1)),dTypeStr)
    frame.createOrReplaceTempView(conf.getString("sink.hive.tmp_table"))
//    hive.executeUpdate("set hive.exec.dynamic.partition=true")
//    hive.executeUpdate("set hive.exec.dynamic.partition.mode=nonstrict")

    val dataFrame: DataFrame = spark.sql(conf.getString("sink.hive.sql"))
    val partition: String = if (conf.hasPath("sink.hive.partitionby")) conf.getString("sink.hive.partitionby") else null

    //将dataFrame 的数据写入到hive表中
    val df: DataFrameWriter[Row] = dataFrame.write.format("com.hortonworks.spark.sql.hive.llap.HiveWarehouseConnector")
      .mode("overwrite")
      .option("table", conf.getString("sink.hive.table"))
      .option("metastoreUri", conf.getString("sink.hive.metastore_uri"))

    if (null != partition){
      df.partitionBy(partition)
    }
    df.save()
  }
}
