package com.etc

import com.alibaba.fastjson.JSONArray
import com.etc.util.{District, HbaseUtil, MapUtil}
import com.uber.h3core.H3Core
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SQLContext, SparkSession}
import org.apache.hadoop.mapreduce.Job

import java.util
import scala.collection.JavaConversions._

/**
 * @Author kalista
 * @Description 离线统计虚拟车站
 * @Date 2021/5/25  15:16
 * */
object VirtualStationsProcessor {

  // 1. 创建h3实例
  val h3 =  H3Core.newInstance


  // 2. 经纬度转hash  六边形边长
  def locationToH3(lat:Double,lon:Double,res:Int): Long ={
    h3.geoToH3(lat,lon,res)
  }


  def main(args: Array[String]): Unit = {

    val HTAB_HAIKOU_ORDER = "HTAB_HAIKOU_ORDER"
    import org.apache.spark._
    //设置Spark程序在控制台中的日志打印级别
    Logger.getLogger("org").setLevel(Level.WARN)
    //local[*]使用本地模式运行，*表示内部会自动计算CPU核数，也可以直接指定运行线程数比如2，就是local[2]
    //表示使用两个线程来模拟spark集群
    val conf = new SparkConf().setAppName("Virtual-stations").setMaster("local[1]")

    val sparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()

    // 广播变量
    val districtList = new java.util.ArrayList[com.etc.util.District]()
    val array: JSONArray = MapUtil.getDistricts("海口市")
    MapUtil.parseDistrictInfo(array,null,districtList);


    //行政区域广播变量 （spark 优化得一个点）
    val districtsBroadcast: Broadcast[java.util.ArrayList[District]] = sparkSession.sparkContext.broadcast(districtList)

    val hbConf = HBaseConfiguration.create(sparkSession.sparkContext.hadoopConfiguration)
    hbConf.set("hbase.zookeeper.quorum", "hdp-01,hdp-02,hdp-03")
    hbConf.set("hbase.zookeeper.property.clientPort", "2181")

    val sqlContext = sparkSession.sqlContext

    //加载hbase 中的订单数据
    val order:DataFrame = HBaseLoader.loadData(hbConf, sqlContext, HTAB_HAIKOU_ORDER)
    println(order.count())
    order.createOrReplaceTempView("order")

    sparkSession.udf.register("locationToH3",locationToH3 _)

    // sql语句使用栅格化 h3 进行六边形栅格化
    val gridDf: DataFrame = sparkSession.sql(
      s"""
         |select
         |ORDER_ID,
         |CITY_ID,
         |STARTING_LNG,
         |STARTING_LAT,
         |locationToH3(STARTING_LAT,STARTING_LNG,12) as h3code
         |from order
         |""".stripMargin)


    print("h3栅格化后的数据:")

    gridDf.show()

    gridDf.createOrReplaceTempView("order_grid")

    val groupCountDf: Dataset[Row] = gridDf.groupBy("h3code").count().filter(" count >=10 ")

    print("根据h3code聚合后的数据:")
    groupCountDf.show()

    groupCountDf.createOrReplaceTempView("groupcount")

    // 使用sql 进行join 操作   升序 找出 最小的精度 最小维度得点 虚拟车站得经纬度信息
    var frame: DataFrame = sparkSession.sql(
      s"""
         |select
         |ORDER_ID,
         |CITY_ID,
         |STARTING_LNG,
         |STARTING_LAT,
         |row_number() over(partition by order_grid.h3code order  by STARTING_LNG,STARTING_LAT asc) rn
         |from order_grid join groupcount on groupcount.h3code = order_grid.h3code
         |having (rn =1)
         |""".stripMargin)

    // 保存数据至hbase中
    HBaseLoader.saveOrWriteDate(hbConf,frame,"VIRTUAL_STATION")

    //TODO 判断经纬度在那个行者区域， 得到行政区域和经纬度得关联关系
    // 太原 -》 小店区 -》 10点
    // 太原 -》 晋源区 -》 34点







  }


}
/**
 * Hbase表数据加载器
 */
object HBaseLoader {


  def loadData(configuration: Configuration, sc: SparkContext, tableName: String):
  RDD[(ImmutableBytesWritable, Result)] = {

    val scanner = new Scan

    //ORDER_ID String,CITY_ID String,STARTING_LNG String,STARTING_LAT String
    scanner.addFamily(Bytes.toBytes("f1"))
    scanner.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("ORDER_ID"))
    scanner.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("CITY_ID"))
    scanner.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("STARTING_LNG"))
    scanner.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("STARTING_LAT"))

    import org.apache.hadoop.hbase.protobuf.ProtobufUtil
    val proto = ProtobufUtil.toScan(scanner)

    val scanToString = Base64.encodeBytes(proto.toByteArray)

    import org.apache.hadoop.hbase.mapreduce.TableInputFormat
    //设置hbase表
    configuration.set(TableInputFormat.INPUT_TABLE, tableName)
    configuration.set(TableInputFormat.SCAN, scanToString)

    sc.newAPIHadoopRDD(configuration, classOf[TableInputFormat],
      classOf[ImmutableBytesWritable], classOf[Result])
  }

  def loadData(configuration: Configuration, sqlContext: SQLContext, tableName: String): DataFrame = {
    val sc = sqlContext.sparkContext
    val value: RDD[(ImmutableBytesWritable, Result)] = loadData(configuration, sc: SparkContext, tableName: String)

    //代码优化的点建议多实用mapPartitions,foreachPartitions
    val rowRDD = value.mapPartitions(iterator => {
      val newItems = new java.util.ArrayList[Row]()
      var row: Row = null;
      while (iterator.hasNext) {
        val result = iterator.next()._2
        row = Row.fromSeq(Seq(
          Bytes.toString(result.getValue(Bytes.toBytes("f1"), Bytes.toBytes("ORDER_ID"))),
          Bytes.toString(result.getValue(Bytes.toBytes("f1"), Bytes.toBytes("CITY_ID"))),
          Bytes.toString(result.getValue(Bytes.toBytes("f1"), Bytes.toBytes("STARTING_LNG"))),
          Bytes.toString(result.getValue(Bytes.toBytes("f1"), Bytes.toBytes("STARTING_LAT")))))
        newItems.add(row)
      }
      Iterator(newItems)
    }).flatMap(row => row)//扁平化的东西

    //field里面指定的类型要和Row里面的类型保持一致
    val fields = Array(
      new StructField("ORDER_ID", StringType),
      new StructField("CITY_ID", StringType),
      new StructField("STARTING_LNG", StringType),
      new StructField("STARTING_LAT", StringType)
    )

    val structType = new StructType(fields)
    sqlContext.createDataFrame(rowRDD, structType)
  }


  /**
   *+--------------+-------+------------+------------+---+
|      ORDER_ID|CITY_ID|STARTING_LNG|STARTING_LAT| rn|
+--------------+-------+------------+------------+---+
|17592719480256|     83|    110.3265|     20.0615|  1|
|17592719778663|     83|    110.3724|     19.9753|  1|
+--------------+-------+------------+------------+---+
   * @param configuration 配置文件
   * @param result 虚拟车站 数据
   * @param tableName hbase表名
   */
  def saveOrWriteDate(configuration: Configuration,result:DataFrame,tableName:String): Unit ={
    configuration.set(TableOutputFormat.OUTPUT_TABLE,tableName)

    val job = new Job(configuration)

    // ImmutableBytesWritable 理解为 hbase表中得rowkey （ImmutableBytesWritable，restult）
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])


    // RDD[row]
    val kvRDD: RDD[(ImmutableBytesWritable, Put)] = result.rdd.mapPartitions(iterator => {
      val newItema = new java.util.ArrayList[(ImmutableBytesWritable, Put)]()

      while (iterator.hasNext) {
        val row: Row = iterator.next()
        // row.getString(0) rowkey
        val put = new Put( Bytes.toBytes(row.getString(0)) )

        // 1. 设置订单id
        put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("ORDER_ID"), Bytes.toBytes(row.getAs[String]("ORDER_ID")))
        // 2. 设置城市得id
        put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("CITY_ID"), Bytes.toBytes(row.getAs[String]("CITY_ID")))
        // 3. 经度
        put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("STARTING_LNG"), Bytes.toBytes(row.getAs[String]("STARTING_LNG")))
        // 4. 维度
        put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("STARTING_LAT"), Bytes.toBytes(row.getAs[String]("STARTING_LAT")))

        // 添加到集合中
        newItema.add((new ImmutableBytesWritable, put))
      }
      Iterator(newItema)
    }).flatMap(kv => kv)

    HbaseUtil.createTable(HbaseUtil.getConnection,tableName,"f1")
    kvRDD.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }




}