package com.lvmama.rhino.analyze.push

import com.lvmama.rhino.common.entity.JobConfig
import com.lvmama.rhino.common.utils.JDBCUtil.JDBCTemplate
import com.lvmama.rhino.common.utils.spark.SparkApplication
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.filter.{CompareFilter, RowFilter, SubstringComparator}
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.{SQLContext, SaveMode}
import org.joda.time.DateTime

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
  * Created by yuanxiaofeng on 2016/7/18.
  */
class DetailLogTopK(config: JobConfig, tableName: String, k: Int) extends SparkApplication {
  override var appName: String = "DetailLogTopKJob"
  override var sparkConfig: Map[String, String] = config.spark

  def execute(): Unit = {
    sparkConfig += ("spark.app.name" -> appName)
    sparkConfig += ("spark.master" -> "local[2]")

    val JDBCDefault = JDBCTemplate.JDBCDefaultSet
    val connP = JDBCTemplate.getConProperties
    withSparkContext { sc =>
      val sqlContext = new SQLContext(sc)
      import sqlContext.implicits._
      import com.lvmama.rhino.common.utils.hbase._
     sc.hbaseTable[(String, String,Option[String], String, Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String], Option[String])](tableName)
          .select("product_id","version","date","to_dest","method","from_dest","main_dest_id","first_channel","second_channel","supp_goods_id","product_dest_id","branch_type","goods_id","category_id","lvversion","from_dest_id")
        .inColumnFamily("detail_info").coalesce(24)
          .filter(x => x._4.equals(new DateTime().plusDays(-1)toString("yyyyMMdd")))
        .map(x => (x._2,Array((x._3,x._4,x._5,x._6,x._7,x._8,x._9,x._10,x._11,x._12,x._13,x._14,x._15,x._16,x._17))))
        .reduceByKey(_ ++ _)
          .map{x =>
            val record = x._2(0)
            (x._1,x._2.length,record._1,record._3,record._4,record._5,record._6,record._7,record._8,record._9,record._10
              ,record._11,record._12,record._13,record._14,record._15)}.sortBy(x => x._2,false)
           .toDF("product_id","counts","version","to_dest","method","from_dest","main_dest_id","first_channel","second_channel","supp_goods_id","product_dest_id","branch_type","goods_id","category_id","lvversion","from_dest_id")
          .orderBy(col("counts").desc).limit(k)
            .write.mode(SaveMode.Append)
                .option("driver", JDBCDefault.get("driver").get)
                .jdbc(JDBCDefault.get("conn").get, "auto_test", connP)
    }
  }

  def convertScanToString(scan: Scan) = {
    val proto = ProtobufUtil.toScan(scan)
    Base64.encodeBytes(proto.toByteArray)
  }

}

object DetailLogTopK {
  val config = JobConfig()

  def apply(tableName: String, k: Int): Unit = new DetailLogTopK(config, tableName, k).execute()
}

