package com.shujia.demo

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}

object Demo2INdex {
  def main(args: Array[String]): Unit = {
    /**
      * 统计每个城市一天的人流量
      *
      */

    val day = args(0)

    val conf = new SparkConf().setAppName("Demo1DataFIlter")

    conf.set("spark.sql.shuffle.partitions", "2")
    val sc = new SparkContext(conf)

    val hiveContext = new HiveContext(sc)

    val resultDF = hiveContext.sql(
      s"""
         |select
         |city_id,count(distinct mdn) as num
         |from staypoint
         |where day = '$day'
         |group by city_id
         |
        |
      """.stripMargin)

    /**
      * 将数结果保存到hbase或者mysql
      *
      */

    resultDF.foreachPartition(iter => {


      val configuration: Configuration = new Configuration()
      configuration.set("hbase.zookeeper.quorum", "node1:2181,node2:2181,node3:2181")
      val connection = HConnectionManager.createConnection(configuration)

      //create 'city_index','info'
      val cityIndex = connection.getTable("city_index")


      //将数据写入hbase
      iter.foreach(row => {
        val city = row.getAs[String]("city_id")
        val num = row.getAs[Long]("num")


        val rowkey = city + "-" + day
        val put = new Put(rowkey.getBytes())

        put.add("info".getBytes(), "num".getBytes(), num.toString.getBytes())

        cityIndex.put(put)

      })

      connection.close()
    })


  }
}
