package demo.spark.hurypoint.logpaser

import demo.spark.utils.{SparkCore, SparkSql}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode, SparkSession}

import scala.reflect.io.{Directory, Path}

/**
 * 目标: Local 处理原始埋点"日志文件" 06-01至07-15，按日进行存储csv与parquet.
 * 提取过程对JSON参数数据进行clear, 去掉多余的转义
 * ## NOTICE:
 * 该种方式为纯离线操作(Local处理)，
 *  1.依赖运维给出Prd环境的埋点日志
 *  2.采用离线处理的方式进行抽取与解析(csv / parquet)
 *  3.再离线对日志数据进行聚合分析
 *  4.TODO: 将分析结果导入关系型数据库 / Hive数据库 (Easy to implement, example: #{@demo.spark.hurypoint.parser.AppSqlRunner})
 * */
object LogFileLocalDealer {

  case class BuryPointParseUnit(dt: String,vin: String,osver: String,
                                model: String,appver: String,
                                event_type: String,page_name: String,
                                ctrl_name: String,event_name: String,
                                args: String);

  def main(args: Array[String]): Unit = {
    // 引入循环控制
    import scala.util.control.Breaks;
    // sparkContext.
    val sparkContext: SparkContext =SparkCore.getContext("LogDealer")("local[*]");
    // sparkSession open.
    val sparkSession: SparkSession = SparkSql.getSession("LogDealer")("local[*]");
    val sqlContext: SQLContext = sparkSession.sqlContext;

    // log home dir.
    val logDir: Directory = Directory("/Users/icasue/Desktop/bury_point/bux1.0-map");
    // csv home dir.
    val csvDirHome: String = "/Users/icasue/Desktop/bury_point/source_data_clear_csv/%s.csv";
    // parquet home dir.
    val parquetDirHome: String = "/Users/icasue/Desktop/bury_point/source_data_clear_parquet/%s.parquet";

    import sqlContext.implicits._;
    val loop: Breaks = new Breaks;

    loop.breakable {
      // load all log files.
      for(path: Path <- logDir.list
          if path.isFile){

        println(path.path);

        // read line simple
        var coreFieldsDF: DataFrame = sparkContext.textFile(path.path)
          .map(line => {
            var colUnit: Array[String] = line split "\\|";
            BuryPointParseUnit(
              // 0 采集时间
              colUnit(0),
              // 1 VIN码
              colUnit(1),
              //4 车机OS软件版本
              colUnit(4),
              // 12 车型
              colUnit(12),
              // 21 应用版本号
              colUnit(21),
              // 32 事件类型
              colUnit(32),
              // 33 页面名称
              colUnit(33),
              // 35 控件名称
              colUnit(35),
              // 36 事件名称
              colUnit(36),
              // 38 埋点参数
              clearJSON(colUnit(38))
            )
          })
          .toDF();

        // 读取时间序列
        val timeSeqs: Seq[String] = coreFieldsDF
          .select($"dt" substr(0,10)).toDF("simpleDt")
          .distinct()
          .map(row => row.getAs[String]("simpleDt"))
          .collect()
          .toList;

        for(timePoint <- timeSeqs){
          // 过滤当前天下的日志
          var timeGroupDF: DataFrame = coreFieldsDF.filter(row => row.getAs[String]("dt").startsWith(timePoint))
            .toDF()
            .sort($"dt".asc);

          // 写入parquet.
          timeGroupDF.write
            .mode(SaveMode.Append)
            .format("parquet")
            .save(parquetDirHome.format(timePoint));

          // 写入csv.
          timeGroupDF.write
            .mode(SaveMode.Append)
            .format("csv")
            .option("sep", ";")
            // 引号字符 默认: "
            .option("quote","")
            // 转义字符 默认: \
            .option("escape","")
            // 列数据类型按照第一行进行推断，默认: false，
            // 会按照每行的结构进行推断, 或者用户直接指定schema.
            .option("inferSchema", "false")
            // 是否把第一行作为header.
            .option("header", "true")
            // null / empty 值处理
            .option("nullValue","")
            .option("emptyValue","")
            .save(csvDirHome.format(timePoint));
        }
      }
    }

    // close spark session.
    sparkSession.close();
  }

  /**
   * clear json function.
   * @param col
   */
  def clearJSON(col: String): String ={
    var colTrans:String = col;
    if(colTrans == null){
      colTrans = "";
    }
    colTrans = colTrans
      .replace("\\","")
      .replace("\"\"","\"")
      .replace("\"{","{")
      .replace("}\"","}")
    colTrans;
  }

}

