package com.inspur

import org.apache.spark.sql.SparkSession

case class Order03(commodityId:String, userId:String)
case class Product03(commodityId:String, style:String)

object PortraitUser2 { //计算买家画像 spark SQL实现

  def main(args: Array[String]): Unit = {
    val sess = SparkSession.builder()
      .appName("PortraitUser2")
      .master("local")
      .getOrCreate()


    import sess.implicits._

    val products = sess.read.textFile("hdfs://192.168.66.88:8020/0616/clear_data/products/part-00000")
      .map(_.split("\t"))
      .map(arr=>Product03(arr(0), arr(1)))
    val order_goods = sess.read.textFile("hdfs://192.168.66.88:8020/0616/clear_data/order_goods/part-00000")
      .map(_.split("\t"))
      .map(arr=>Order03(arr(1), arr(0)))

    products.createOrReplaceTempView("products")
    order_goods.createOrReplaceTempView("order_goods")

    sess.sql("select userId, concat_ws(';',collect_set(style)) from products t1 join order_goods t2 " +
      "on (t1.commodityId = t2.commodityId) group by userId")
      .rdd
      .map(x=>{
        val arr = x.get(1).toString.split(";")
        var list = List[String]()
        for(a <- arr) {
          if((!a.isEmpty) && (a.length != 0) && (!list.contains(a)))
            list = a :: list
        }
        (x.get(0).toString, list.mkString(";"))
      })
      .map(x=>x._1 + "\t" + x._2)
      .toDF()
//      .show(100,false)
      .repartition(1)
      .write.csv("hdfs://192.168.66.88:8020/0616/portrait_buyer_sparkSQL")
  }

}
