package demo.spark.hurypoint.filedealer

import java.time.LocalDate

import demo.spark.utils.{SparkCore, SparkSql}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{SQLContext, SaveMode, SparkSession}
import org.apache.spark.storage.StorageLevel

object ParquetDealer {

  // 源数据 parquet home目录.
  val SOURCE_PARQUET_HOME_DIR = "/Users/icasue/Desktop/bury_point/parquet/"
  val SOURCE_PARQUET_ITEM_DIR_SUFFIX = ".parquet";

  // 保存 parquet home目录.
  val SAVE_PARQUET_HOME_DIR = "/Users/icasue/Desktop/bury_point/parquet_json_clear/"
  val SAVE_PARQUET_ITEM_DIR_SUFFIX = ".parquet"

  // 保存 scv home目录.
  val SAVE_CSV_HOME_DIR = "/Users/icasue/Desktop/bury_point/csv_json_clear/"
  val SAVE_CSV_ITEM_DIR_SUFFIX = ".csv"

  def main(args: Array[String]): Unit = {
    //import demo.spark.utils.SparkCore.master;
    //master = "local[*]";
    val sparkContext : SparkContext = SparkCore.getContext("BuryPointDemo")("local[*]")
    val sparkSession : SparkSession =  SparkSql.getSession("BuryPointDemo")("local[*]")
    val sqlContent : SQLContext =  sparkSession.sqlContext

    // 全局注册clearJSONStr用户函数
    sparkSession.udf.register("clearJSONStr",(col:String) => {
      var colTrans:String = col;
      if(colTrans == null){
        colTrans = "";
      }
      colTrans = colTrans
        .replace("\\","")
        .replace("\"\"","\"")
        .replace("\"{","{")
        .replace("}\"","}")
      colTrans;
    })

    // 获取读取parquet的时间序列.
    val from = "2021-06-01";
    val to = "2021-07-15";
    val timeSeqs = getTimeSequences(from, to);

    // 全局引入sparkSession隐士变量/方法.;
    // 引入scala 循环控制.
    import scala.util.control._

    var totalCount: Long = 0;

    val loop = new Breaks;
    for (timeStr: String <- timeSeqs){

      // 读取parquet文件为DataFrame.
      val parquetItemDir = SOURCE_PARQUET_HOME_DIR.concat(timeStr).concat(SOURCE_PARQUET_ITEM_DIR_SUFFIX)
      val parquetToDF = sparkSession.read.parquet(parquetItemDir);

      // 累计并汇报处理条数.
      val count:Long = parquetToDF.count();
      totalCount += count;
      println(s"\t##----> ${timeStr} deal item count: ${count}, deal total count: ${totalCount}")

      // 注册临时表.
      val tabName = s"temp_" + timeStr.replace("-", "")
      parquetToDF.createOrReplaceTempView(tabName);
      println(s"\t##----> create temp table: ${tabName}")

      // 清理JSON格式.
      val jsonClearDF = sparkSession.sql(s"select dt,vin,osver,model,appver,event_type,page_name,ctrl_name,event_name,clearJSONStr(args) as args from ${tabName}")
        .persist(StorageLevel.MEMORY_ONLY);

      // 转parquet存储.
      jsonClearDF
        .write
        .mode(SaveMode.Overwrite)
        .format("parquet")
        .save(SAVE_PARQUET_HOME_DIR.concat(timeStr).concat(SAVE_PARQUET_ITEM_DIR_SUFFIX));

      // 转csv存储.
      jsonClearDF
        .write
        .mode(SaveMode.Overwrite)
        .format("csv")
        .option("sep", ";")
        // 引号字符 默认: "
        .option("quote","")
        // 转义字符 默认: \
        .option("escape","")
        // 列数据类型按照第一行进行推断，默认: false，
        // 会按照每行的结构进行推断, 或者用户直接指定schema.
        .option("inferSchema", "false")
        // 是否把第一行作为header.
        .option("header", "true")
        // null / empty 值处理
        .option("nullValue","")
        .option("emptyValue","")
        .save(SAVE_CSV_HOME_DIR.concat(timeStr).concat(SAVE_CSV_ITEM_DIR_SUFFIX));

      // 跳出循环.
      loop.break()
    }

    // 关闭SparkSession.
    sparkSession.close();
  }


  // 获取时间序列
  def getTimeSequences(from :String, to: String): Seq[String] = {
     require(from != null && to != null);
     val fromDate: LocalDate = LocalDate.parse(from);
     val toDate: LocalDate = LocalDate.parse(to);
     require(fromDate.isBefore(toDate) || fromDate.isEqual(toDate));
     //val skipSize = fromDate.until(toDate)
     Stream.iterate[LocalDate](fromDate)((previous:LocalDate) => previous.plusDays(1))
      .takeWhile(current => current.isBefore(toDate) || current.isEqual(toDate))
      //.take(skipSize)
      .map(date => date.toString)
      .toList;
  }
}
