package demo.spark.hurypoint.filedealer

import demo.spark.utils.{SparkCore, SparkSql}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.storage.StorageLevel
import ParquetDealer.getTimeSequences

object CsvDealer {

  val CSV_HOME_DIR = "/Users/icasue/Desktop/bury_point/map_event_track/"
  val CSV_FILE_SUFFIX = ".csv"

  case class BuryPoint(dt:String,vin:String,osver:String,model:String,
                    appver:String,event_type:String,page_name:String,
                    ctrl_name:String,event_name:String,args:String)


  def main(args: Array[String]): Unit = {

    val sparkContext : SparkContext = SparkCore.getContext("BuryPointDemo")("local[*]")
    val sparkSession : SparkSession =  SparkSql.getSession("BuryPointDemo")("local[*]")
    val sqlContent : SQLContext =  sparkSession.sqlContext

    // 忽略损坏的文件
    sparkSession.sql("set spark.sql.files.ignoreCorruptFiles=true")
    // 忽略丢失的文件, 当读取的文件集包含多个文件时，忽略丢失文件，继续读取并返回其他文件.
    sparkSession.sql("set spark.sql.files.ignoreMissingFiles=true")
    // 关闭DF分区列类型自动推断，目前分区类型支持(数字,日期,时间戳,字符串)，关闭后全是字符串
    sparkSession.sql("set spark.sql.sources.partitionColumnTypeInference.enabled=false")
    // 全局schema合并, 会消耗资源, 通过这种方式，可以再迭代时
    //sparkSession.sql("set spark.sql.parquet.mergeSchema=true")

    // 全局注册clearJSONStr用户函数
    sparkSession.udf.register("clearJSONStr",(col:String) => {
      var colTrans:String = col;
      if(colTrans == null){
        colTrans = "";
      }
      colTrans = colTrans
        .replace("\\","")
        .replace("\"\"","\"")
        .replace("\"{","{")
        .replace("}\"","}")
      colTrans;
    })

    // 全局引入sparkSession隐士变量/方法.;

    // 获取读取时间序列.
    val from = "2021-06-01";
    val to = "2021-07-15";
    val timeSeqs = getTimeSequences(from, to)

    var totalCount: Long = 0;
    for (timeStr: String <- timeSeqs){

      // 读取csv文件为DataFrame.
      val csvFile = CSV_HOME_DIR.concat(timeStr).concat(CSV_FILE_SUFFIX)
      val csvToDF = sparkSession.read.format("csv")
        // 文件全局过滤器，读取目标路径下满足后缀的文件.
        //.option("pathGlobFilter", "*.parquet")
        // 递归加载文件，禁用分区推断.
        //.option("recursiveFileLookup", "true")
        // 文件修改时间过滤器.
        //.option("modifiedBefore", "2020-07-01T05:30:00")
        //.option("modifiedAfter", "2020-06-01T05:30:00")
        // 合并schema
        //.option("mergeSchema", "true")
        .option("sep", ",")
        .option("inferSchema", "true")
        .option("header", "true")
        .load(csvFile)
        .persist(StorageLevel.MEMORY_ONLY)
        //.as[BuryPoint](Encoders.bean(BuryPoint.getClass))

      // 累计并汇报处理条数.
      val count:Long = csvToDF.count();
      totalCount += count;
      println(s"\t##----> ${timeStr} deal item count: ${count}, deal total count: ${totalCount}")

      // 注册临时表.
      val tabName = s"temp_" + timeStr.replace("-", "")
      csvToDF.createOrReplaceTempView(tabName);
      println(s"\t##----> create temp table: ${tabName}")

      sparkSession.sql(s"select dt,vin,osver,model,appver,event_type,page_name,ctrl_name,event_name,clearJSONStr(args) as args from ${tabName}")
          .show(10)

      sparkSession.sql(s"select clearJSONStr(args) as args from ${tabName}")
        .limit(10)
        .foreach(row => println(row.getAs[String]("args")))

      // 数据处理 -> 转parquet存储

      // 数据处理 -> 转csv存储，存储时对于args列进行标准JSON转化.

    }

    // 关闭SparkSession.
    sparkSession.close();
  }


}
