package com.fast.leo

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{FileSplit, InputSplit, TextInputFormat}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.{HadoopRDD, RDD}
import org.apache.spark.sql.SparkSession

/**
  * @author leo.jie (weixiao.me@aliyun.com)
  * @organization DataReal
  * @version 1.0
  * @website https://www.jlpyyf.com
  * @date 2019-07-28 20:29
  * @since 1.0
  */
object WordCount {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .appName("WordCount")
      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    val fileRddOri = loadFileToRdd(sc, "hdfs://leo/test/pos.DAT")
    val fileRdd = fileRddOri.map(x => for (data <- x._2.split("\\|")) yield if (data == null) "" else data.trim)
      .filter(x => x.length == 12)
      .map(x => (retailer_shop_code(x(3)), x(10).substring(10, 13), x(8).toFloat))
      .map(x => ((x._1, x._2), x._3))
      .reduceByKey(_ + _)

    fileRdd.top(10)(Ordering.by(e => e._2)).foreach(println(_))
    println("##########################################################")
    fileRdd.map(x => (x._1._1, x._2)).reduceByKey(_ + _).top(10)(Ordering.by(e => e._2)).foreach(println(_))
    println("##########################################################")
    println(fileRdd.count())
    println("##########################################################")
    println(fileRdd.first())
    println("##########################################################")
    fileRdd.take(10).foreach(println(_))
    while (true) {
      ;
    }
    spark.stop()
  }

  /**
    * 读取gbk编码的file
    *
    * @param sc
    * @param path
    * @param encoding
    * @return
    */
  def loadFileToRdd(sc: SparkContext, path: String, encoding: String = "GBK"): RDD[(String, String, Int)] = {
    sc.hadoopFile[LongWritable, Text, TextInputFormat](path)
      .asInstanceOf[HadoopRDD[LongWritable, Text]]
      .mapPartitionsWithInputSplit((inputSplit: InputSplit, iterator: Iterator[(LongWritable, Text)]) => {
        val file = inputSplit.asInstanceOf[FileSplit]
        iterator.filter(x => x._2 != null).map(x => {
          (file.getPath.getName, new String(x._2.getBytes, 0, x._2.getLength, encoding), 1)
        })
      })
  }

  /**
    * 只是一个店铺号转换的函数
    *
    * @param retailer_shop_code
    * @return
    */
  def retailer_shop_code(retailer_shop_code: String): String = {
    if (retailer_shop_code == null && retailer_shop_code.trim.length == 0) ""
    else if (retailer_shop_code.length == 5) retailer_shop_code.substring(0, retailer_shop_code.length - 1).toUpperCase()
    else if (retailer_shop_code.length == 6) retailer_shop_code.substring(0, retailer_shop_code.length - 2).toUpperCase()
    else if (retailer_shop_code.length == 8) retailer_shop_code.substring(0, retailer_shop_code.length - 2).toUpperCase()
    else retailer_shop_code
  }
}
