package cn.doitedu.profile.tagextract

import cn.doitedu.commons.utils.{DictsLoader, SparkUtil}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}

import scala.collection.mutable.ListBuffer


/**
  * @author: 余辉
  * @blog: https://blog.csdn.net/silentwolfyh
  * @create: 2019/10/19
  * @description:
  * 1、商城数仓-消费商品画像报表
  * 2、用户订单商品退拒分析报表 标签抽取
  * 3、返回：gid，模块，便签，值，权重 (Long, String, String, String, Double)
  **/

case class AdsUserGoods(
                         user_id: String,
                         p_sales_cnt: Double,
                         p_sales_amt: Double,
                         p_sales_cut_amt: Double,
                         h_sales_cnt: Double,
                         h_sales_amt: Double,
                         h_sales_cut_amt: Double,
                         return_cnt: Double,
                         return_amt: Double,
                         reject_cnt: Double,
                         reject_amt: Double,
                         common_first_cat: Double,
                         common_second_cat: Double,
                         common_third_cat: Double
                       )

object AdsUserGoodsTagExtractor {
  def extractUserGoodsTags(spark: SparkSession, path: String, idmp: collection.Map[Long, Long]): RDD[(Long, String, String, String, Double)] = {

    // 1、建立消费商品退拒表的 StructType
    val schema = new StructType(Array(
      new StructField("user_id", DataTypes.StringType),
      new StructField("p_sales_cnt", DataTypes.DoubleType),
      new StructField("p_sales_amt", DataTypes.DoubleType),
      new StructField("p_sales_cut_amt", DataTypes.DoubleType),
      new StructField("h_sales_cnt", DataTypes.DoubleType),
      new StructField("h_sales_amt", DataTypes.DoubleType),
      new StructField("h_sales_cut_amt", DataTypes.DoubleType),
      new StructField("return_cnt", DataTypes.DoubleType),
      new StructField("return_amt", DataTypes.DoubleType),
      new StructField("reject_cnt", DataTypes.DoubleType),
      new StructField("reject_amt", DataTypes.DoubleType),
      new StructField("common_first_cat", DataTypes.DoubleType),
      new StructField("common_second_cat", DataTypes.DoubleType),
      new StructField("common_third_cat", DataTypes.DoubleType)
    ))

    // 2、加载数据和schema变成DataFrame，同时将idmapping变成广播变量
    import spark.implicits._
    val df = spark.read.schema(schema).option("header", true).csv(path)
    val bc = spark.sparkContext.broadcast(idmp)

    // 3、dataFrame转为dataSet，同时建立bean对象
    val ds: Dataset[AdsUserGoods] = df.as[AdsUserGoods]

    // 4、bean对象处理，返回标签
    ds
      .rdd
      // 4-1、mapPartitions 是一个迭代器，每一个Partitions只调用一次广播变量
      .mapPartitions(iter => {
        // 4-2、加载idmapping变成广播变量
        val idmpDict = bc.value
        // 4-3、mapPartitions是一个迭代器,通过map函数处理
        iter.map(bean => {
          // 4-3-1、建立一个ListBuffer 存储数据
          val lst = new ListBuffer[(Long, String, String, String, Double)]

          // 4-3-2、通过UID在idmapping中获取gid
          val user_id = bean.user_id
          val gid = idmpDict.getOrElse(user_id.hashCode.toLong, -1L)

          // 4-3-3、消费商品模块为 M020
          lst += ((gid, "M020", "T0201", bean.p_sales_cnt.toString, -9999.9))
          lst += ((gid, "M020", "T0202", bean.p_sales_amt.toString, -9999.9))
          lst += ((gid, "M020", "T0203", bean.p_sales_cut_amt.toString, -9999.9))
          lst += ((gid, "M020", "T0204", bean.h_sales_cnt.toString, -9999.9))
          lst += ((gid, "M020", "T0205", bean.h_sales_amt.toString, -9999.9))
          lst += ((gid, "M020", "T0206", bean.h_sales_cut_amt.toString, -9999.9))
          lst += ((gid, "M020", "T0207", bean.return_cnt.toString, -9999.9))
          lst += ((gid, "M020", "T0208", bean.return_amt.toString, -9999.9))
          lst += ((gid, "M020", "T0209", bean.reject_cnt.toString, -9999.9))
          lst += ((gid, "M020", "T0210", bean.reject_amt.toString, -9999.9))
          lst += ((gid, "M020", "T0211", bean.common_first_cat.toString, -9999.9))
          lst += ((gid, "M020", "T0212", bean.common_second_cat.toString, -9999.9))
          lst += ((gid, "M020", "T0213", bean.common_third_cat.toString, -9999.9))

          // 4-3-4、迭代器进，迭代器出
          lst.toIterator
        })
      })
      // 5、mapPartitions的迭代器打散
      .flatMap(iter => iter)
  }

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    val ads_user_goods_path = "user_profile/data/t_user_goods_yh"
    val idmp_path = "user_profile/data/output/idmp/day01"
    val idmp = DictsLoader.loadIdmpDict(spark, idmp_path)
    val goodsLogs: RDD[(Long, String, String, String, Double)] = extractUserGoodsTags(spark, ads_user_goods_path, idmp)
    goodsLogs.take(10).foreach(println)
  }
}
