package com.example.springsecurity.controller

import io.swagger.annotations.{Api, ApiOperation}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{avg, desc, max, min}
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.springframework.scheduling.annotation.Scheduled
import org.springframework.web.bind.annotation.{CrossOrigin, GetMapping, RequestMapping, RestController}

import java.util.UUID

@RestController
@RequestMapping(Array("/spark"))
@Api(value = "数据功能", tags = Array("数据功能"))
@CrossOrigin
class SparkController {


  @ApiOperation("更新数据")
  @GetMapping(Array("/test"))
  @Scheduled(cron = "0 0 4 ? * *")
  def test: String = {
    updateData()
    "success"
  }

  def updateData() {

    val conf = HBaseConfiguration.create()
    //sparkSession封装了SparkContext和SQLContext,并且在builder的getOrCreate方法中判断是否符合要求的SparkSession存在，有责是用，没有则进行创建
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("tt").config("spark.hadoop.validateOutputSpecs","false").getOrCreate()
    //获取SparkSession的SparkContext
    val sc: SparkContext = spark.sparkContext

    //设置查询的表名
    conf.set(TableInputFormat.INPUT_TABLE, "products")
    val stuRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
    val count = stuRDD.count()
    println("Students RDD Count:" + count)
    stuRDD.cache()

    //收集数据
    val rdd = stuRDD.map({ case (_, result) =>
      val key = Bytes.toString(result.getRow)
      val date = Bytes.toString(result.getValue("data".getBytes, "date".getBytes))
      val price = Bytes.toString(result.getValue("data".getBytes, "price".getBytes))
      val unit = Bytes.toString(result.getValue("data".getBytes, "unit".getBytes))
      val productName = Bytes.toString(result.getValue("data".getBytes, "productName".getBytes))
      val province = Bytes.toString(result.getValue("data".getBytes, "province".getBytes))
      val category = Bytes.toString(result.getValue("data".getBytes, "category".getBytes))
      val place = Bytes.toString(result.getValue("data".getBytes, "place".getBytes))
      (key, date, province, place, productName, category, price, unit)
    })



    val structSchema: StructType = StructType(
      List(
        StructField("key", StringType, true),
        StructField("date", StringType, true),
        StructField("province", StringType, true),
        StructField("place", StringType, true),
        StructField("productName", StringType, true),
        StructField("category", StringType, true),
        StructField("price", DoubleType, true),
        StructField("unit", StringType, true)
      )
    )

    val structRow: RDD[Row] = rdd.map(line => Row(line._1.trim, line._2.trim, line._3.trim, line._4.trim, line._5.trim, line._6.trim, line._7.trim.toDouble, line._8.trim))
    val structDf: DataFrame = spark.createDataFrame(structRow, structSchema)


    val rdd5:RDD[Row] = structDf.groupBy("province","`date`","category").agg(
      avg("price").as("avg_price"),
      max("price").as("max_price"),
      min("price").as("min_price")
    ).orderBy("`date`").rdd
    val fields5: Array[String] = Array("data:province","data:date","data:category", "data:avg_price", "data:max_price", "data:min_price")
    writeHBase("t5", fields5, rdd5, sc)


    //省平均价格最低前10
    val rdd4: RDD[Row] = structDf.groupBy("province","category").agg(
      avg("price").as("avg_price"),
      max("price").as("max_price"),
      min("price").as("min_price")
    ).orderBy("avg_price").rdd
    val fields4: Array[String] = Array("data:province","data:category","data:avg_price", "data:max_price", "data:min_price")
    writeHBase("t4", fields4, rdd4, sc)

    //省平均价格最高前10
    val rdd3: RDD[Row] = structDf.groupBy("province","category").agg(
      avg("price").as("avg_price"),
      max("price").as("max_price"),
      min("price").as("min_price")
    ).orderBy(desc("avg_price")).rdd
    val fields3: Array[String] = Array("data:province","data:category", "data:avg_price", "data:max_price", "data:min_price")
    writeHBase("t3", fields3, rdd3, sc)

    //全国不同省市的生猪价格分布
    val rdd2: RDD[Row] = structDf.groupBy("province", "place","category").agg(
      avg("price").as("avg_price"),
      max("price").as("max_price"),
      min("price").as("min_price")
    ).orderBy(desc("avg_price")).rdd
    val fields2: Array[String] = Array("data:province","data:place","data:category",  "data:avg_price", "data:max_price", "data:min_price")
    writeHBase("t2", fields2, rdd2, sc)

    //全国不同省份生猪价格发布数量分布
    val rdd1 = structDf.groupBy("province","category").count().rdd
    val fields1: Array[String] = Array("data:province","data:category", "data:count")
    writeHBase("t1", fields1, rdd1, sc)

//    rdd1.foreach(row => {
//      println(row)
//    })

    println("done!")
  }

  def writeHBase(tableName: String, fields: Array[String], data: RDD[Row], sc: SparkContext): Unit = {
    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tableName)

    val job = new Job(sc.hadoopConfiguration)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    val rdd = data.map(arr => {
      val put = new Put(Bytes.toBytes(UUID.randomUUID().toString.replace("-", "")))
      val i = 0
      for (i <- 0 until fields.length) {
        val cols: Array[String] = fields(i).split(":")
        if (cols.length == 1) {
          put.addColumn(Bytes.toBytes(cols(0)), Bytes.toBytes(""), Bytes.toBytes(arr.get(i).toString)); //因为当输入的是单列族，split仅读出一个字符字符串，即cols仅有一个元素
        }
        else {
          put.addColumn(Bytes.toBytes(cols(0)), Bytes.toBytes(cols(1)), Bytes.toBytes(arr.get(i).toString));
        }
      }
      (new ImmutableBytesWritable, put)
    })
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration())
  }
}
