package com.learn.spark.analysis

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object GetProductInfoDetails {
  def main(args: Array[String]): Unit = {
    val appName = "all_top_10_products_details";
    val sourceFiles = Array("hdfs://master:8020/hadoop/result/tmall/product_*_top_10/*.csv", "hdfs://master:8020/hadoop/result/tmall/product_alipay_top_10*/*.csv")
    val SEPERATE = ",";

    val sparkSession: SparkSession = SparkSession.builder().appName(appName).getOrCreate()
    val context: SparkContext = sparkSession.sparkContext
    val allLines: RDD[String] = context.textFile(sourceFiles.mkString(","))
    val tupleRdd: RDD[String] = allLines
      .filter(s => (!s.contains("count")))
      .map(x => x.split(SEPERATE)(0).trim).distinct()
    val rowRdd: RDD[Row] = tupleRdd.map(tup => Row(tup))
    val schema = StructType(Array(
      StructField("item_id", StringType, true)
    ))
    val stuDf: DataFrame = sparkSession.createDataFrame(rowRdd, schema)
    val allProduct: DataFrame = getAllProductLines(sparkSession);
    val joinDF: DataFrame = stuDf.join(allProduct, Seq("item_id"), "left")
    joinDF.repartition(1)
      .write
      .mode("overwrite")
      .option("header", "true")
      .option("delimiter", ",")
      .option("encoding", "UTF8")
      .csv("hdfs://master:8020/hadoop/result/tmall/" + appName)
    context.stop()
  }


  def getAllProductLines(sparkSession: SparkSession): DataFrame = {
    val files = "hdfs://master:8020/hadoop/product/tianchi_2014001_rec_tmall_product.txt"
    val SEPERATE = "\\u0001";
    val context: SparkContext = sparkSession.sparkContext
    val allLines: RDD[String] = context.textFile(files)
    val rowRdd: RDD[Row] = allLines
      .filter(x => !x.contains("item_id,title,pict_url,category,brand_id,seller_id") && x.split(SEPERATE).length == 6)
      .map(tup => {
        val lineArr = tup.split(SEPERATE)
        Row(lineArr(0).trim, lineArr(1).trim, lineArr(2).trim, lineArr(3).trim, lineArr(4).trim, lineArr(5).trim)
      })
    val schema = StructType(Array(
      StructField("item_id", StringType, true),
      StructField("title", StringType, true),
      StructField("pict_url", StringType, true),
      StructField("category", StringType, true),
      StructField("brand_id", StringType, true),
      StructField("seller_id", StringType, true)
    ))
    val stuDf: DataFrame = sparkSession.createDataFrame(rowRdd, schema)
    stuDf
  }
}
