package com.learn.spark.analysis

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object KeyWordCount {
  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      throw new RuntimeException("must give top count");
    }
    val topCount = Integer.valueOf(args(0))
    val appName = "product_key_words"
    val sparkSession: SparkSession = SparkSession.builder().appName(appName).getOrCreate()
    val filePath = "hdfs://master:8020/hadoop/result/tmall/all_top_10_products_details/*.csv"
    val context: SparkContext = sparkSession.sparkContext
    val allLines: RDD[String] = context.textFile(filePath).filter(x => {
      !(x.contains("item_id"))
    }).map(x => {
      x.split(",")(1)
    })
    val topNResult: Array[(String, Int)] = allLines.flatMap(_.split(" ")).filter(x => {
      !(x == null
        || "".equals(x.trim)
        || x.contains("-")
        || x.contains("(")
        || x.contains("【")
        || x.contains("】")
        || x.contains("/")
        || x.contains(":")
        || x.matches("\\d+")
        || x.matches("[A-Za-z]+")
        )
    }).map(x => (x, 1)).reduceByKey(_ + _).map(x => (x._2, x._1))
      .sortByKey(false)
      .map(y => (y._2, y._1)).take(Integer.valueOf(topCount))
    val resultRdd: RDD[(String, Int)] = context.parallelize(topNResult).repartition(1)
    val rowRdd: RDD[Row] = resultRdd.map(tup => Row(tup._1, tup._2))
    val schema = StructType(Array(
      StructField("key_word", StringType, true),
      StructField("count", IntegerType, true)
    ))
    val stuDf: DataFrame = sparkSession.createDataFrame(rowRdd, schema)
    stuDf.repartition(1)
      .write
      .mode("overwrite")
      .option("header", "true")
      .option("delimiter", ",")
      .option("encoding", "UTF8")
      .csv("hdfs://master:8020/hadoop/result/tmall/" + appName)
    context.stop()
  }
}
