package com.learn.spark.analysis

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object ActionDateTopNDetails {
  def main(args: Array[String]): Unit = {

    if (args.length != 2) {
      throw new RuntimeException("must give action name(action name must be collect/cart/click/alipay) and date");
    }
    val ACTION = args(0)
    val DATE = args(1)
    val APP_NAME = ("product_" + ACTION + "_top_10_" + DATE).replaceAll("-", "")
    val files = "hdfs://master:8020/hadoop/log/tianchi_2014002_rec_tmall_log_part*.txt"
    val SEPERATE = "\\u0001";
    val sparkSession: SparkSession = SparkSession.builder().appName(APP_NAME).getOrCreate()
    val context: SparkContext = sparkSession.sparkContext
    val topNResult: Array[(String, Int)] = context.textFile(files)
      .filter(s => {
        !(s.contains("item_id")) && (s.contains(ACTION) && (s.contains(DATE)))
      })
      .map(x => {
        (x.split(SEPERATE)(0), 1)
      })
      .reduceByKey(_ + _)
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(10)
      .map(y => (y._2, y._1))
    val resultRdd: RDD[(String, Int)] = context.parallelize(topNResult).repartition(1)
    val rowRdd: RDD[Row] = resultRdd.map(tup => Row(DATE, tup._1, tup._2))
    val schema = StructType(Array(
      StructField("date", StringType, true),
      StructField("item_id", StringType, true),
      StructField("count", IntegerType, true)
    ))
    val stuDf: DataFrame = sparkSession.createDataFrame(rowRdd, schema)
    stuDf.repartition(1)
      .write
      .mode("overwrite")
      .option("header", "true")
      .option("delimiter", ",")
      .csv("hdfs://master:8020/hadoop/result/tmall/" + APP_NAME)
    context.stop()
  }
}
