package com.njbdqn.datahandler

import com.njbdqn.util.{HdfsConnection, MysqlConnection}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{row_number, sum, udf}

object ALSDataHandler {
  case class UserAction(act:String,act_time:String,cust_id:String,good_id:String,browse:String)

  val actToNum = udf{
    (str:String)=>{
      str match {
        case "BROWSE"=>1
        case "COLLECT"=>2
        case "BUYCAR"=>4
        case _ =>8
      }
    }
  }

  //为了防止用户编号或商品编号中含有非数字情况，所有要对所有的商品和客户编号给一个连续的对应的数字编号在储存到缓存
  // 将商品编号转为连续数字
  def goodToNum(spark:SparkSession)={
    import  spark.implicits._
    val wnd = Window.orderBy("good_id")
    HdfsConnection.readDataFromHdfs(spark,"/kb08/myshops/dwd_good")
      .select($"good_id",row_number().over(wnd).as("gid")).cache()
  }

  // 将用户编号转为连续数字
  def userToNum(spark: SparkSession)={
    import  spark.implicits._
    val wnd1 = Window.orderBy("cust_id")
    MysqlConnection.readTable(spark,"customs")
      .select($"cust_id",row_number().over(wnd1).as("uid")).cache()
  }

  def alsData(spark:SparkSession) = {
    val txt = spark.sparkContext.textFile("file:///F:\\IT\\study\\bigdata\\myact\\logs\\*.log").cache()
    // 将读入的数据转为dataframe
    import spark.implicits._
    // 计算出每个用户对该用户购买的商品评分
    val df = txt.map(line => {
      val arr = line.split(" ")
      UserAction(arr(0), arr(1), arr(2), arr(3), arr(4))
    }).toDF().select($"cust_id", $"good_id", actToNum($"act").as("score"))
      .groupBy("cust_id", "good_id").agg(sum($"score").as("score")).cache()

    // 将df和 goodstab 和custtab join 起来但只保留(uid,gid,score)
    df.join(goodToNum(spark), Seq("good_id"), "inner").join(userToNum(spark), Seq("cust_id"), "inner")
      .select("uid", "gid", "score").cache()
  }
}
