package com.njbdqn.datahandler

import com.njbdqn.util.{HdfsConnection, MysqlConnection}
import org.apache.spark.ml.feature.{MinMaxScaler, StringIndexer, VectorAssembler}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType

/**
 * 逻辑回归数据准备 将als召回 全局召回 分组召回的数据集合成一个大集合
 * 再根据用户下过的订单将集合分为 已下单商品和推荐商品
 */
object LRDataHandler {
  // 简单的数据归一化
  val priceToOne= udf{
    (price:String)=>{
      val p = price.toDouble
      p/(10000+p)
    }
  }

  // 判断用户是否喜欢商品 用户下单或购物车就喜欢 否则不喜欢
  val isLove = udf{
    (act:String)=>{
      if (act.equalsIgnoreCase("BROWSE")||
        act.equalsIgnoreCase("COLLECT")){
        0
      }else{
        1
      }
    }
  }


  def goodNumberFormat(spark:SparkSession)= {
    val good_info = MysqlConnection.readTable(spark, "goods").filter("is_sale=1")
      .drop("goods_name", "created_at", "update_at", "good_img_pos", "sku_good_code",
        "content","tags","spu_pro_name","sku_title").cache()
    // 品牌的数字化
    val brand_indexer = new StringIndexer().setInputCol("brand_name").setOutputCol("brand")
    val bi = brand_indexer.fit(good_info).transform(good_info).drop("brand_name")
    // 商品分类的数字化
    val type_indexer = new StringIndexer().setInputCol("cate_name").setOutputCol("cate")
    val ct = type_indexer.fit(bi).transform(bi).drop("cate_name")
    // 将原价和现价转为归一化处理  库存量归一化处理
    import spark.implicits._
    val pc = ct.withColumn("nprice",priceToOne($"price"))
      .withColumn("noriginal",priceToOne($"original"))
      .withColumn("nsku_num",priceToOne($"sku_num"))
      .drop("price","original","sku_num")

    // 将商品的特征值转为数字
    val feat_index = new StringIndexer().setInputCol("spu_pro_value").setOutputCol("pro_value")
    feat_index.fit(pc).transform(pc).drop("spu_pro_value")
  }

  def lrData(spark:SparkSession) = {
    import spark.implicits._
    // 获取全局热卖数据   cust_id good_id sellnum
   val hot = HdfsConnection.readDataFromHdfs(spark,"/kb08/myshops/dwd_hotsell")
     .select($"cust_id",$"good_id")
    // 首先获取分组召回的数据  cust_id good_id rank
    val group = HdfsConnection.readDataFromHdfs(spark,"/kb08/myshops/dwd_group")
      .select($"cust_id",$"good_id")
    // 获取als召回数据 cust_id good_id score
    val als = HdfsConnection.readDataFromHdfs(spark,"/kb08/myshops/dwd_als")
      .select($"cust_id",$"good_id")
    // 获取用户下单数据   用户下单或存放到购物车的行为 用户是喜欢的 否则为不喜欢
    val order = spark.sparkContext.textFile("file:///F:\\IT\\study\\bigdata\\myact\\logs\\*.log")
    .map(line=>{
    val arr = line.split(" ")
    (arr(0),arr(2),arr(3))
  }).toDF("act","cust_id","good_id")
      .withColumn("flag",isLove($"act"))
      .drop("act").distinct().cache()

    // 将三路召回的数据合并成一个大数据集  cust_id good_id flag
    // 为每一列添加LR回归算法需要的用户自然属性、用户行为属性、商品自然属性
    // 用户完全没有见过的商品填充为2
    val all = hot.union(group).union(als).join(order,Seq("cust_id","good_id"),"left")
      .na.fill(2)
    // 调用用户的自然属性和行为属性
    val user_infos = KMeansDataHandler.user_act_info(spark)
    // 从数据库获取商品中影响销售的自然属性
    val good_info = goodNumberFormat(spark)

    // 将3路召回的数据和用户信息、商品信息进行关联
    val ddf = all.join(user_infos, Seq("cust_id"), "inner")
      .join(good_info, Seq("good_id"), "inner")
    // 将数据进行全体转double
    val columns = ddf.columns.map(f=>col(f).cast(DoubleType))
    val num_fmt = ddf.select(columns:_*)

    // 将特征列聚合到一起形成密集向量
    val va = new VectorAssembler().setInputCols(
      Array("province_id","city_id","district_id","sex",
        "marital_status","education_id","vocation","post","compId","mslevel",
        "reg_date","last_date","age","user_score","logincount","buycount","pay",
        "is_sale","spu_pro_status","brand","cate","nprice","noriginal","nsku_num",
        "pro_value")).setOutputCol("orign_feature")

    val ofdf = va.transform(num_fmt).select($"cust_id",$"good_id",$"flag".as("label"),$"orign_feature")

    // 将对应的列做归一化处理
    val mmScaler = new MinMaxScaler().setInputCol("orign_feature").setOutputCol("feature")
    val res = mmScaler.fit(ofdf).transform(ofdf).select($"cust_id",$"good_id",$"label",$"feature")

    // 将用户未见过的商品作为推荐商品选项
    (res.filter("label!=2"),res.filter("label=2"))

  }
}
