package com.learn.spark.analysis

import org.apache.commons.lang3.time.FastDateFormat
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import java.util.{Calendar, Date}

object SalesPerTimeTopN {
  def main(args: Array[String]): Unit = {
    val appName = "product_sales_per_time"
    val SOURCE_FORMAT: FastDateFormat = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss")
    val TARGET_FORMAT: FastDateFormat = FastDateFormat.getInstance("yyyy-MM-dd")
    val SEPERATE = "\\u0001";
    val sparkSession: SparkSession = SparkSession.builder().appName(appName).getOrCreate()
    val files = "hdfs://master:8020/hadoop/log/tianchi_2014002_rec_tmall_log_part*.txt"

    val context: SparkContext = sparkSession.sparkContext


    val allLines: RDD[String] = context.textFile(files)
    val topNResult: Array[(Int, (String, Int, Int, Int, Int))] = allLines
      .filter(s => s.contains("alipay"))
      .map(x => {
        val times = x.split(SEPERATE)
        val date: Date = SOURCE_FORMAT.parse(times(3))
        val calendar: Calendar = Calendar.getInstance()
        calendar.setTime(date)
        val month: Int = calendar.get(Calendar.MONTH)
        val quarter: Int = (calendar.get(Calendar.MONTH)) / 3 + 1
        ((TARGET_FORMAT.format(date), calendar.get(Calendar.YEAR), (month + 1), calendar.get(Calendar.DAY_OF_MONTH), quarter)
          , 1
        )
      })
      .reduceByKey(_ + _)
      .map(y => (y._2, y._1))
      .sortByKey(false)
      .take(10)
    val resultRdd: RDD[(Int, (String, Int, Int, Int, Int))] = context.parallelize(topNResult).repartition(1)
    val rowRdd: RDD[Row] = resultRdd.map(tup => {
      Row(tup._2._1, tup._2._2, tup._2._3, tup._2._4, tup._2._5, tup._1)
    })
    val schema: StructType = StructType(Array(
      StructField("date", StringType, true),
      StructField("year", IntegerType, true),
      StructField("month", IntegerType, true),
      StructField("day", IntegerType, true),
      StructField("quarter", IntegerType, true),
      StructField("count", IntegerType, true)
    ))

    val stuDf: DataFrame = sparkSession.createDataFrame(rowRdd, schema)
    stuDf.repartition(1)
      .write
      .mode("overwrite")
      .option("header", "true")
      .option("delimiter", ",")
      .csv("hdfs://master:8020/hadoop/result/tmall/" + appName)
    context.stop()
  }
}
