package jupitermouse.site.spark.sync.v2

import java.util.Properties
import java.util.function.BiConsumer

import com.alibaba.fastjson.JSON
import com.typesafe.scalalogging.Logger
import jupitermouse.site.spark.sync.v2.model.{Partition, Setting, Target}
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ArrayBuffer
import scala.io.Source

/**
 * 入口
 *
 * @author renqiqiang
 * @since 1.0
 */
object PartionApp {

  private val log = Logger(LoggerFactory.getLogger(PartionApp.getClass))

  def main(args: Array[String]): Unit = {
    if (args.length < 1) {
      throw new IllegalArgumentException("Need more then one args,peean")
    }
    log.info("Args: {}", args)
    val settingFilePath = args(0)
    log.info("Setting file path: {}", settingFilePath)
    // 获取配置
    val setting = parseSetting(settingFilePath)
    // 获取或创建spark会话
    val spark = getOrCreate(setting)
    // 读取数据源
    val df = read(spark, setting)
    // 写入数据库
    save(df, setting.target)
    // 停止
    spark.stop()
  }

  def parseSetting(settingFilePath: String): Setting = {
    val source = Source.fromFile(settingFilePath, "UTF-8")
    val json = source.mkString
    log.info("Setting: {}", json)
    JSON.parseObject(json, classOf[Setting])
  }

  def getOrCreate(setting: Setting): SparkSession = {
    val builder = SparkSession.builder()
      .appName(setting.app)
    // 配置
    setting.config.forEach(new BiConsumer[String, String]() {
      override def accept(t: String, u: String): Unit = builder.config(t, u)
    })
    builder.enableHiveSupport().getOrCreate()
  }

  def read(spark: SparkSession, setting: Setting): DataFrame = {
    val source = setting.source
    val partition = source.partition
    val properties = new Properties()
    source.properties.forEach(new BiConsumer[String, String]() {
      override def accept(t: String, u: String): Unit = properties.setProperty(t, u)
    })
    spark.read.jdbc(source.url, source.dbtable, predicate(partition), properties)
  }

  def predicate(partition: Partition): Array[String] = {
    var fdf = FastDateFormat.getInstance(partition.dataFormat)
    // 转换为时间戳
    val r1 = fdf.parse(partition.lowerBound).getTime
    val r2 = fdf.parse(partition.upperBound).getTime
    val n = partition.numPartitions.toInt
    // 步长
    val step: Long = (r2 - r1) / n
    val arr = ArrayBuffer[String]()
    for (i <- 0 until n) {
      arr.append(fdf.format(r1 + i * step))
    }
    // 时间戳到字符
    arr.append(fdf.format(r2))

    log.info("partition: {}",arr)

    val array = ArrayBuffer[(String, String)]()
    for (i <- 0 until arr.length-1) {
      val start = arr(i)
      val end = arr(i + 1)
      array += start -> end
    }
    // 拼接条件
    val predicates = array.map {
      case (start, end) => s"${partition.partitionColumn} >= '$start' AND ${partition.partitionColumn} < '$end'"
    }
    predicates.toArray
  }

  def save(df: DataFrame, target: Target): Unit = {
    df.write.mode(target.mode).format("hive").options(target.config)
      .saveAsTable(target.dbtable)
  }

}
