package demo.spark.hurypoint.logpaser

import java.net.URI
import java.time.LocalDate
import java.util

import demo.spark.utils.{SparkCore, SparkSql}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs
import org.apache.hadoop.fs.{Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, SQLContext, SaveMode, SparkSession}

/**
 * 目标: 处理来自HDFS上原始埋点的"日志文件" 模拟(06-01至06-15)，按日进行存储csv与parquet.
 * 提取过程对JSON参数数据进行clear, 去掉多余的转义
 * ## NOTICE:
 * 该种方式解决数据来源问题，不再由运维人员提供日志文件，也属于离线操作(Local / 任务调度处理):
 *  1.编写Spark程序用于读取不同环境的埋点数据HDFS [##Finished]
 *  2.对HDFS上的埋点数据进行抽取与解析(csv / parquet) [##Finished]
 *  3.将解析结果存储至HDFS(文件) [##Finished]
 *  4.创建Hive表，数据源为HDFS上解析后的日志文件(使用parquet文件性能更好) [##Finished]
 *  5.采用HiveSql进行分析聚合 / 调用Spark程序对文件进行分析聚合
 *      [##TODO: 目前提供的能力是将日志数据抽取存储File以及Hive，该部分功能为实际需求，完成该部分功能简单聚合可读取(Hive), 复杂聚合可读取(File)完成]
 *  6.将聚合的结果(文件导入到Hive(HDFS))，结构化数据导入到(Hive / 关系型数据库Mysql)
 *      [##TODO: 该部分功能属于离线需求求果的数据落地，需要开发根据实际需求与资源进行商讨]
 * */
object LogFileClusterDealer {

  private case class BuryPointParseUnit(dt: String,vin: String,osver: String,model: String,
                                        appver: String,event_type: String,page_name: String,
                                      ctrl_name: String,event_name: String,args: String);

  // hdfs 协议传输端口.
  private val PROTOCOL_HDFS = "hdfs://icasue111:9090";
  // load Hive表前缀.
  private val HIVE_TAB_PREFIX_DEFAULT = "bury_point_";
  // hive tab create DDL.
  private val HIVE_TAB_CREATE_FORMAT_DDL: String = new StringBuilder()
    .append("create external table if not exists %s ")
    .append("(dt string,vin string,osver string,model string,appver string,event_type string,page_name string, ctrl_name string,event_name string,args string) ")
    .append("stored as parquet ")
    .append("location '%s';")
    .toString

  /**
   * Main方法作为钩子程序的接口，每日凌晨00:00:01触发一次，
   * 传入昨天生成的埋点的日志文件目录(注意: 可能文件是聚合文件例如 6.1 - 6.15)
   * 准备工作：
   *  1.模拟在HDFS上传6.1 - 6.15号的原始埋点日志 (BUX1.0_map_6月1-15日.txt)
   *  2.人为触发 logParseAndStoreInHDFS 方法模拟钩子程序每天的调度.
   */
  def main(args: Array[String]): Unit = {
    // 本地日志文件存储目录
    val logFileStoreDirInLocal: String = "/Users/icasue/Desktop/bury_point/bux1.0-map/BUX1.0_map_6月1-15日.txt";
    // 模拟钩子程序传入的日志文件目录.
    var logDirInHDFS: String = "/log/bury_point/20210601_20210615/";
    // 日志文件解析后的输出目录(由开发定，具有一定的逻辑，一般逻辑按日存储)
    val logParseDirOutFDFS_TimeTemplate: String = "/log/bury_point/parse/%s/";

    // 将本地日志文件上传至HDFS中 20210601_20210615/ 目录下(已完成) [该过程意图模拟公司日志数据来源为HDFS]
    //val logFileInHDFS: String = logDirInHDFS concat "application.log";
    //uploadLogFileToHDFS(logFileStoreDirInLocal,logFileInHDFS);

    // HDFS日志文件解析与存储(返回存储至HDFS的parquet目录用于之后的Hive映射).
    val logOurSeqs = logParseAndStoreInHDFS(logDirInHDFS, logParseDirOutFDFS_TimeTemplate)

    // 将本次解析的parquet文件作为数据源创建Hive表映射.
    // 日志按月划分数据库[db_yyyyMM] db_202106, db_202107 ...,
    // 同一月中按天存储结构化数据[bury_point_yyyyMMdd] bury_point_20210601, bury_point_20210601 ...
    createHiveTablesUsingParquet(logOurSeqs);
  }

  /**
   * 使用存储在HDFS上的parquet文件目录创建Hive表映射.
   * @param parquetFileOutDirs
   */
  def createHiveTablesUsingParquet(parquetFileOutDirs: java.util.List[String],
                                   hiveTblPrefix: String = HIVE_TAB_PREFIX_DEFAULT)
                                  (implicit hiveTblFormatDDL: String = HIVE_TAB_CREATE_FORMAT_DDL): Unit = {
    if(parquetFileOutDirs == null || parquetFileOutDirs.size() == 0)
      return
    // sparkSession open.
    val sparkSession: SparkSession = SparkSql.getHiveSession("LogDealerLoadToHive",metastoreURIS = "thrift://mac14:9083")("local[*]");

    import sparkSession.sqlContext.implicits._;
    try {
      // all databases.
      var databases: Map[String,String] = sparkSession.sql("show databases;")
        .toDF("db")
        .map(row => (row.getAs[String]("db"),""))
        .collect()
        .toMap[String,String]


      // choose db.table and create hive mapping.
      for (idx <- 0 until parquetFileOutDirs.size()){
        // foreach every dir, getting log occur on day time.
        var oneParquetDir: String = parquetFileOutDirs.get(idx)
        var parquetDirDayTime_yyyyMMdd: String =
          oneParquetDir.substring(oneParquetDir.lastIndexOf("/") + 1, oneParquetDir.lastIndexOf("."))
            .replace("_","")

        // check database exist or not, change databases;
        var dbName: String = "db_" concat parquetDirDayTime_yyyyMMdd.substring(0,6)
        if(!databases.contains(dbName)){
          // create database.
          sparkSession.sql(s"create database ${dbName};")
          databases += dbName -> ""
        }
        sparkSession.sql(s"use ${dbName};");

        // format Hive's table name.
        var hiveTabName: String = hiveTblPrefix concat parquetDirDayTime_yyyyMMdd

        // check and create hive table if exist or not.
        var hiveTableDDL: String = hiveTblFormatDDL format (hiveTabName,oneParquetDir);
        sparkSession.sql(hiveTableDDL)
      }
    }finally {
      // choose spark session.
      sparkSession.close();
    }
  }

  /**
   * 分析与解析钩子程序传入的日志文件(HDFS)，将提取的座舱需要的属性存储为parquet 与 csv文件(HDFS)
   * @param logDirInHDFS 埋点日志文件(可能是聚合多天的文件例如6.1 - 6.15),
   *                     为保证日志数据每天分析每天更新, 让钩子调用程序每天都触发，
   *                     我们的Spark程序只解析与分析前一天的日志信息, 采用WriteMode.Append，
   *                     但是会校验该文件中之前日志有没有落地(如果没有分析落地，则也会分析).
   * @param fileOutDirInHDFS_TimeTemplate
   *
   * NOTICE:
   *  1. .parquet 作为输出目录下parquet文件目录的后缀
   *  2. .csv 作为输出目录下csv文件目录的后缀
   */
  def logParseAndStoreInHDFS(logDirInHDFS: String, fileOutDirInHDFS_TimeTemplate: String): util.List[String] = {
    // sparkContext.
    val sparkContext: SparkContext = SparkCore.getContext("LogDealer")("local[*]");
    // sparkSession open.
    val sparkSession: SparkSession = SparkSql.getSession("LogDealer")("local[*]");
    val sqlContext: SQLContext = sparkSession.sqlContext;
    // file system.
    val fileSystem: org.apache.hadoop.fs.FileSystem = org.apache.hadoop.fs.FileSystem.get(
      URI.create(PROTOCOL_HDFS), new Configuration, "icasue")

    import sqlContext.implicits._;

    // 引入循环控制
    import scala.util.control.Breaks;
    val loop: Breaks = new Breaks;

    // 创建并注册parquet文件输出目录结果集序列收集器.
    val parsedParquetDirAccumulator: SeqAccumulator[String,String] =
      new SeqAccumulator[String,String](origin => origin)
    sparkContext.register(parsedParquetDirAccumulator)

    try {
      loop.breakable {
        // today's calender time.
        val calenderToday: LocalDate = LocalDate.now();

        // read log dir from hdfs, create day time mapping.
        // notice: exclude today.
        var timeLogMappingDF: DataFrame = sparkContext.textFile(PROTOCOL_HDFS concat logDirInHDFS)
          .map((line: String) => (line.substring(0,10),line))
          .filter(!_._1.equals(calenderToday.toString))
          .toDF("dayTime","log");

        // getting log occur times district, and check logs which product before previous day had benn parsed and stored!
        // notice: before today.
        var aggregateTimeSeqs: Seq[String] = timeLogMappingDF.select("dayTime")
          .distinct()
          .map(row => row.getAs[String]("dayTime"))
          .sort()
          .collect()
          .toList;

        // previous log parse process escape's time seqs.
        val escapeParseTimeLog: java.util.Map[String,Path] = new util.LinkedHashMap;

        // check before previous day's log had been parsed as stored.
        for(aggregateTime <- aggregateTimeSeqs){
          val aggregateTimeStoreUnit : String = aggregateTime.replace("-", "_")
          val timeLogParseDir: String = fileOutDirInHDFS_TimeTemplate.format(aggregateTimeStoreUnit)
          val path: Path = new Path(timeLogParseDir)
          if(!fileSystem.exists(path))
            escapeParseTimeLog.put(aggregateTime,path)
        }

        // filter log lines and trans to core fields DataSet.
        var coreFieldsTimeMappedDS: Dataset[(String,BuryPointParseUnit)] = timeLogMappingDF
          .filter(timeLogRow => escapeParseTimeLog.containsKey(timeLogRow.getAs[String]("dayTime")))
          .map(timeLogRow => {
            var colUnit: Array[String] = timeLogRow.getAs[String]("log") split "\\|";
            (
              timeLogRow.getAs[String]("dayTime"),
              BuryPointParseUnit(
                // 0 采集时间
                colUnit(0),
                // 1 VIN码
                colUnit(1),
                //4 车机OS软件版本
                colUnit(4),
                // 12 车型
                colUnit(12),
                // 21 应用版本号
                colUnit(21),
                // 32 事件类型
                colUnit(32),
                // 33 页面名称
                colUnit(33),
                // 35 控件名称
                colUnit(35),
                // 36 事件名称
                colUnit(36),
                // 38 埋点参数
                clearJSON(colUnit(38))
              )
            )
          });


        // foreach time check escaped unit and aggregate logs by calendar
        val iterator: java.util.Iterator[util.Map.Entry[String,Path]] = escapeParseTimeLog.entrySet().iterator()

        while (iterator.hasNext){
          var mapped_DayTime_LogPath: util.Map.Entry[String,Path]  = iterator.next()
          var dayTime: String = mapped_DayTime_LogPath.getKey
          var logOutPath: Path = mapped_DayTime_LogPath.getValue

          // filter dayTime logs.
          var timeAggregatedLogParseUnitDF: DataFrame = coreFieldsTimeMappedDS
            .filter(_._1.equals(dayTime))
            .map(_._2)
            .toDF();

          // create parsed file's write dir to using hdfs protocol.
          var actuallyParquetHome: String = PROTOCOL_HDFS concat (logOutPath.toString concat ".parquet")
          var actuallyCsvHome: String = PROTOCOL_HDFS concat (logOutPath.toString concat ".csv")

          // collector parquet out dir.
          parsedParquetDirAccumulator.add(actuallyParquetHome);

          // write 'parquet' to HDFS out dir with hdfs protocol, using 'WriteMode.Append'
          timeAggregatedLogParseUnitDF.write
            .mode(SaveMode.Append)
            .format("parquet")
            .save(actuallyParquetHome);

          // write 'csv' to HDFS out dir with hdfs protocol, using 'WriteMode.Append'
          timeAggregatedLogParseUnitDF.write
            .mode(SaveMode.Append)
            .format("csv")
            .option("sep", ";")
            // 引号字符 默认: "
            .option("quote","")
            // 转义字符 默认: \
            .option("escape","")
            // 列数据类型按照第一行进行推断，默认: false，
            // 会按照每行的结构进行推断, 或者用户直接指定schema.
            .option("inferSchema", "false")
            // 是否把第一行作为header.
            .option("header", "true")
            // null / empty 值处理
            .option("nullValue","")
            .option("emptyValue","")
            .save(actuallyCsvHome);
        }
      }
      parsedParquetDirAccumulator.value
    }finally {
      fileSystem.close();
      sparkContext.stop();
      sparkSession.close();
    }
  }


  /**
   * 通过Hive客户端将本地文件上传到HDFS
   */
  def uploadLogFileToHDFS(localLogDir:String, hdfsLogDir:String): Unit = {
    import scala.util.control.Breaks;
    import SparkSql.metastoreUris;
    metastoreUris = "thrift://mac14:9083";

    val sparkContext: SparkContext = SparkCore.getContext("LoadLogFile")("local[*]");
    // 创建HIVE客户端连接
    val sparkSession: SparkSession = SparkSql.getHiveSession("LoadLogFile")("local[*]")
    // HDFS文件系统
    val configuration: Configuration = new Configuration
    configuration.set("fs.defaultFS", PROTOCOL_HDFS)
    val fileSystem: org.apache.hadoop.fs.FileSystem = org.apache.hadoop.fs.FileSystem.get(configuration)

    // 逻辑控制单元
    val loop: Breaks = new Breaks;
    loop.breakable {
      // 访问HDFS目录(通过Hive JDBC的适配不支持)
      //sparkSession.sql("dfs -ls /").show();

      // 通过文件系统方式访问文件系统根目录.
      val rootPaths = fileSystem.listStatus(new org.apache.hadoop.fs.Path("/"), (pathItem: fs.Path) => pathItem.getParent.getName.equals(""));
      for (pathItem <- rootPaths) println(pathItem.getPath.toUri.toString);

      // 校验文件目录存在性.
      val hdfsPath = new org.apache.hadoop.fs.Path(hdfsLogDir)
      if(fileSystem.exists(hdfsPath)){
        println("## Error: 日志目录已存在!")
        loop.break();
      }

      // 日志文件上传到HDFS.

      // 方式1: Hive方式依赖Table（这样做不合理）
      //sparkSession.sql("create database icasue3;");
      //sparkSession.sql("use icasue3;");
      //sparkSession.sql(s"create [external] table bury_point_0601_0615(ling string) row format delimited fields terminated by '\n' location '${hdfsLogDir}' ")
      //sparkSession.sql(s"load data local inpath '${localLogDir}' overwrite into table bury_point_0601_0615;")

      // 方式2: 采用文件系统客户端上传.
      fileSystem.copyFromLocalFile(false, new org.apache.hadoop.fs.Path(localLogDir), hdfsPath)
    }

    fileSystem.close();
    sparkContext.stop();
    sparkSession.close();
  }

  /**
   * clear json function.
   * @param col
   */
  def clearJSON(col: String): String ={
    var colTrans:String = col;
    if(colTrans == null){
      colTrans = "";
    }
    colTrans = colTrans
      .replace("\\","")
      .replace("\"\"","\"")
      .replace("\"{","{")
      .replace("}\"","}")
    colTrans;
  }

}



