package com.yongche.kafka

import com.yongche.utils.TimeUtils
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils}
import org.apache.spark.streaming.{StreamingContext, Time}
import org.joda.time.DateTime

/**
  * Created by yd on 2016/10/20.
  */
case class KafkaInfo(topic: String) {

  /**
    *
    * @param fn        rdd 转换成 df
    * @param rdd       数据rdd
    * @param timestamp 时间戳
    * @param dt        当前需要写的时间点
    */
  def write2HDFS(fn: (RDD[String] => DataFrame))(rdd: RDD[(String, String)], timestamp: Time)(dt: String): Unit = {
    dt match {
      //如果时间为空，那么写入到错误的目录中
      case null => {
        //错误信息
        val error = rdd.filter(_._1 == null)
        //当前时间戳
        val dt = new DateTime(timestamp.milliseconds)
        val path = s"/etl/${topic}/fail/${dt.toString(TimeUtils.YYYYMMDD)}/${dt.toString(TimeUtils.YYYYMMDDHHMM)}"
        //        error.map(_._2).toDF().write.mode(SaveMode.Overwrite).parquet(path)
        fn(error.map(_._2)).write.mode(SaveMode.Overwrite).parquet(path)

      }
      case yyyymmddhh => {
        //过滤当前时间的消息
        val data = rdd.filter(line => yyyymmddhh.equals(line._1))
        //保存日志,需要制定天的目录
        val dt = DateTime.parse(yyyymmddhh, TimeUtils.DF_YYYYMMDDHH)
        val path = s"/etl/${topic}/${dt.toString(TimeUtils.YYYYMMDD)}/${yyyymmddhh}"
        //        data.map(_._2).toDF().write.mode(SaveMode.Append).parquet(path)
        fn(data.map(_._2)).write.mode(SaveMode.Append).parquet(path)
      }
    }
  }

  /**
    * 生成kafak输入流
    *
    * @param ssc     streamingContext
    * @param brokers kafak 地址
    * @param zkHost  zookeeper地址
    * @return
    */
  def streaming(ssc: StreamingContext)(brokers: String, zkHost: String): DStream[String] = {
    //kafka offset的实现类
    val offsetStore = OffsetStore(zkHost)
    //获取原有的offset
    val offsetJson = Option[String](offsetStore.query(s"/etl/${topic}/current"))
    //kafka的配置文件
    val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers,
      "fetch.message.max.bytes" -> "104857600")
    val streams = offsetJson match {
      //基于原有的offset创建环境
      case Some(o) => {
        val fromOffset = OffsetJson.fromJson(o)
        val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffset, messageHandler)
      }
      //新的应用，那么根据新的offset重新创建。
      case None => KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, Set(topic))
    }
    streams.transform(saveKafakOffset(offsetStore)_).map(_._2) //只需要value即可，不需要key的值
      .filter(line => !line.trim.isEmpty) //去除日志行为空的数据
  }


  /**
    * 保存kafak topic的偏移量
    *
    * @param offsetStore 需要保存的offset的实现类
    * @param rdd         kafkaRDD
    * @param timestamp   当前batch的时间
    * @return kafkaRDD
    */
  private def saveKafakOffset(offsetStore: OffsetStore)(rdd: RDD[(String, String)], timestamp: Time): RDD[(String, String)] = {
    //得到本次batch对应kafka的偏移量
    val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
    // 将偏移量转成json
    val offsetJson = OffsetJson.toJson(offsetRanges)
    //当前batch时间
    val dt = new DateTime(timestamp.milliseconds)
    val YYYYMMDDHHMM = dt.toString(TimeUtils.YYYYMMDDHHMM)
    println(s"${YYYYMMDDHHMM}-本次读取的数据范围为:${offsetJson}") //打印offset
    //保存offset
    offsetStore.save(s"/etl/${topic}/${YYYYMMDDHHMM}", offsetJson)
    offsetStore.save(s"/etl/${topic}/current", offsetJson)
    rdd
  }
}
