package com.hxk.merge

import com.hxk.spark.merge.LogMessage
import org.apache.hadoop.fs.Path
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.LoggerFactory

object TimeMerge {

  //初始化一个logger ，打印自己代码的日志
  private val logger = LoggerFactory.getLogger("TimeMerge")

  def main(args: Array[String]): Unit = {

    //1.初始化SparkConf， 在初始化SparkContext之前
    val conf = new SparkConf()
    //2.设置SparkConf的AppName,必须有的
    conf.setAppName("TimeMerge")
    //3.设置使得程序支持本地跑，又支持集群跑
    if(!conf.contains("spark.master")) {
      conf.setMaster("local")
    }
    //for5.2 开启kryo序列化机制(性能优于Java.io.Serializable)
    //        为了让LogMessage类可以在driver端与excutor端可以进行网络传输
    conf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")

    //4.初始化一个SparkContext，然后把conf传入；SparkContext是Spark应用的入口
    val sc = new SparkContext(conf)

    val dataInputPath = conf.get("spark.timeMerge.dataInputPath","data/input.csv")
    val baseOutputPath = conf.get("spark.timeMerge.baseOutputPath","data/output")

    //5.任务实现
    //5.1 加载数据（input.csv）
    val rawRDD: RDD[String] = sc.textFile(dataInputPath)
    //rawRDD.collect().foreach(println)
    /*
    UserA,LocationA,2018-01-01 08:00:00,60
    UserA,LocationA,2018-01-01 09:00:00,60
    UserA,LocationB,2018-01-01 10:00:00,60
    UserA,LocationA,2018-01-01 11:00:00,60
    UserB,LocationA,2018-01-01 12:00:00,59
    UserB,LocationA,2018-01-01 13:00:00,60
    * */
    //5.2 解析rawRDD每一行原始日志数据
    //val paesedRDD: RDD[LogMessage] = rawRDD.map(line => RawDataParser.parse(line))
    val paesedRDD: RDD[LogMessage] = rawRDD.map(RawDataParser.parse(_))
    //paesedRDD.collect().foreach(println)
    /*
    {"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 08:00:00", "residence_time": 60}
    {"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 09:00:00", "residence_time": 60}
    {"user_id": "UserA", "location": "LocationB", "start_time": "2018-01-01 10:00:00", "residence_time": 60}
    {"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 11:00:00", "residence_time": 60}
    {"user_id": "UserB", "location": "LocationA", "start_time": "2018-01-01 12:00:00", "residence_time": 59}
    {"user_id": "UserB", "location": "LocationA", "start_time": "2018-01-01 13:00:00", "residence_time": 60}
    * */
    //5.3 数据处理部分（TimeMerge）
    //5.3.1 根据user_id进行分组
    val userGroupedRDD: RDD[(String,Iterable[LogMessage])] = paesedRDD.groupBy(logmessage => logmessage.getUserId.toString)
    //userGroupedRDD.collect().foreach(println)
    /*
    (UserB,CompactBuffer({"user_id": "UserB", "location": "LocationA", "start_time": "2018-01-01 12:00:00", "residence_time": 59},
                         {"user_id": "UserB", "location": "LocationA", "start_time": "2018-01-01 13:00:00", "residence_time": 60}))
    (UserA,CompactBuffer({"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 08:00:00", "residence_time": 60},
                         {"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 09:00:00", "residence_time": 60},
                         {"user_id": "UserA", "location": "LocationB", "start_time": "2018-01-01 10:00:00", "residence_time": 60},
                         {"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 11:00:00", "residence_time": 60}))
     */
    //5.3.2 根据分组，对每一组进行Time Merge操作,处理UserA，UserB对应的value
    val mergeGroupRDD = userGroupedRDD.flatMapValues{ case iter =>
        //user级别的Time Merge
        val oneUserGroupProcess = new OneUserGroupProcess(iter.toArray)
        oneUserGroupProcess.timeMerge()
    }
    //mergeGroup.collect().foreach(println)
    /*
    (UserB,{"user_id": "UserB", "location": "LocationA", "start_time": "2018-01-01 12:00:00", "residence_time": 59})
    (UserB,{"user_id": "UserB", "location": "LocationA", "start_time": "2018-01-01 13:00:00", "residence_time": 60})
    (UserA,{"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 09:00:00", "residence_time": 120})
    (UserA,{"user_id": "UserA", "location": "LocationB", "start_time": "2018-01-01 10:00:00", "residence_time": 60})
    (UserA,{"user_id": "UserA", "location": "LocationA", "start_time": "2018-01-01 11:00:00", "residence_time": 60})
     */
    //5.3.3 保存结果数据，注，如果写Parquet，就需要依赖org.apache.parquet.parquet-avro这个jar包
    val saveProcessRDD = mergeGroupRDD.map{ case (key,logmessage) =>
      val userid = logmessage.getUserId
      val location = logmessage.getLocation
      val startTime = logmessage.getStartTime
      val residentTime = logmessage.getResidenceTime
      s"${userid},${location},${startTime},${residentTime}"
    }
    //saveProcessRDD.collect().foreach(println)
    deleteIfExits(sc,baseOutputPath)
    saveProcessRDD.saveAsTextFile(baseOutputPath)
    //6.最后将SparkContext停止
    sc.stop()
    logger.info("EDN TimeMerge")
  }

  private def deleteIfExits(sc: SparkContext, OutputPath: String) = {
    val path = new Path(OutputPath)
    val fileSystem = path.getFileSystem(sc.hadoopConfiguration)
    if (fileSystem.exists(path)) {
      fileSystem.delete(path, true)
    }
  }
}
