package com.example.springsecurity.hadoop.spark

import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import java.util.UUID
class SparkOperateHBase {
  def updateData() {

    val conf = HBaseConfiguration.create()
    val sc = new SparkContext(new SparkConf())

    //设置查询的表名
    conf.set(TableInputFormat.INPUT_TABLE, "products")
    val stuRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
    val count = stuRDD.count()
    println("Students RDD Count:" + count)
    stuRDD.cache()

    //收集数据
    val rdd=stuRDD.map({ case (_, result) =>
      val key = Bytes.toString(result.getRow)
      val date = Bytes.toString(result.getValue("data".getBytes, "date".getBytes))
      val price = Bytes.toString(result.getValue("data".getBytes, "price".getBytes))
      val unit = Bytes.toString(result.getValue("data".getBytes, "unit".getBytes))
      val productName = Bytes.toString(result.getValue("data".getBytes, "productName".getBytes))
      val province = Bytes.toString(result.getValue("data".getBytes, "province".getBytes))
      val category = Bytes.toString(result.getValue("data".getBytes, "category".getBytes))
      val place = Bytes.toString(result.getValue("data".getBytes, "place".getBytes))
      (key,date,province,place,productName,category,price,unit)
    }).sortBy(tp => (tp._7,tp._2))

    //sparkSession封装了SparkContext和SQLContext,并且在builder的getOrCreate方法中判断是否符合要求的SparkSession存在，有责是用，没有则进行创建
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("tt").getOrCreate()
    //获取SparkSession的SparkContext
    val sc2: SparkContext = spark.sparkContext

    val structSchema: StructType = StructType(
      List(
        StructField("key", StringType, true),
        StructField("date", StringType, true),
        StructField("province", StringType, true),
        StructField("place", StringType, true),
        StructField("productName", StringType, true),
        StructField("category", StringType, true),
        StructField("price", DoubleType, true),
        StructField("unit", StringType, true)
      )
    )

    val structRow: RDD[Row] = rdd.map(line=>Row(line._1.trim,line._2.trim,line._3.trim,line._4.trim,line._5.trim,line._6.trim,line._7.trim.toDouble,line._8.trim))
    val structDf: DataFrame = spark.createDataFrame(structRow,structSchema)

    //省平均价格最低前10
    val rdd4:RDD[Row]=structDf.groupBy("province").agg(
      avg("price").as("avg_price"),
        max("price").as("max_price"),
        min("price").as("min_price")
      ).orderBy("avg_price").limit(10).rdd
    val fields4: Array[String] = Array("data:province", "data:avg_price","data:max_price","data:min_price")
    writeHBase("t4", fields4, rdd4, sc)

    //省平均价格最高前10
    val rdd3:RDD[Row]=structDf.groupBy("province").agg(
        avg("price").as("avg_price"),
        max("price").as("max_price"),
        min("price").as("min_price")
      ).orderBy(desc("avg_price")).limit(10).rdd
    val fields3: Array[String] = Array("data:province", "data:avg_price","data:max_price","data:min_price")
    writeHBase("t3", fields3, rdd3, sc)

    //全国不同省份的生猪价格分布
    val rdd2:RDD[Row]=structDf.groupBy("province","place").agg(
        avg("price").as("avg_price"),
        max("price").as("max_price"),
        min("price").as("min_price")
      ).orderBy(desc("avg_price")).limit(10).rdd
    val fields2: Array[String] = Array("data:province", "data:avg_price","data:max_price","data:min_price")
    writeHBase("t2",fields2,rdd2,sc)

    //全国不同省份生猪价格发布数量分布
    val rdd1=structDf.groupBy("province").count().filter("count>10").rdd
    val fields1: Array[String]= Array("data:province","data:count")
    writeHBase("t1",fields1,rdd1,sc)
  }

  def writeHBase(tableName: String, fields: Array[String], data: RDD[Row], sc: SparkContext): Unit = {
    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tableName)

    val job = new Job(sc.hadoopConfiguration)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    val rdd = data.map(arr => {
      val put = new Put(Bytes.toBytes(UUID.randomUUID().toString.replace("-", "")))
      val i = 0
      for (i <- 0 until fields.length) {
        val cols: Array[String] = fields(i).split(":")
        if (cols.length == 1) {
          put.addColumn(Bytes.toBytes(cols(0)), Bytes.toBytes(""), Bytes.toBytes(arr.get(i).toString)); //因为当输入的是单列族，split仅读出一个字符字符串，即cols仅有一个元素
        }
        else {
          put.addColumn(Bytes.toBytes(cols(0)), Bytes.toBytes(cols(1)), Bytes.toBytes(arr.get(i).toString));
        }
      }
      (new ImmutableBytesWritable, put)
    })
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration())
  }


}