package com.cxk.fe

import org.apache.spark.sql.types.{LongType, StructField}
import org.apache.spark.sql.{DataFrame, Row, SparkSession, functions}

object FeatureEngineering {

  def apply(dir: String, spark: SparkSession): FeatureEngineering = new FeatureEngineering(dir, spark)

  class FeatureEngineering(val dir: String, val spark: SparkSession) {

    private[this] val train_csv = dir + "/train.csv"
    private[this] val test_csv = dir + "/test.csv"
    private[this] val members_csv = dir + "/members.csv"
    private[this] val reader = spark.read.option("header", "true")


    /**
      * 读取csv
      *
      * @param path 文件路径
      * @return cvs表数据
      */
    def read_csv(path: String): DataFrame = {
      reader.csv(path)
    }

    /**
      * 注册时间和到期时间特征衍生
      *
      * @param save 是否保存
      * @return 衍生出来的特征
      */
    def members_registration_expiration(save: Boolean = false): DataFrame = {
      val df = read_csv(members_csv)
        .selectExpr("msno", "TO_DATE(registration_init_time,'yyyyMMdd') AS init_time", "TO_DATE(expiration_date,'yyyyMMdd') AS exp_date")
        .sort("init_time", "exp_date")
        .selectExpr("msno", "DAY(init_time) AS reg_day", "DAYOFWEEK(init_time) AS reg_week", "WEEKOFYEAR(init_time) AS reg_week_ordinal",
          "MONTH(init_time) AS reg_month", "QUARTER(init_time) AS reg_quarter", "YEAR(init_time) AS reg_year",
          "MONTHS_BETWEEN(exp_date,init_time) AS diff_months", "DATEDIFF(exp_date,init_time) AS diff_dates",
          "DAY(exp_date) AS exp_day", "DAYOFWEEK(exp_date) AS exp_week", "WEEKOFYEAR(exp_date) AS exp_week_ordinal",
          "MONTH(exp_date) AS exp_month", "QUARTER(exp_date) AS exp_quarter", "YEAR(exp_date) AS exp_year")

      if (save)
        df.coalesce(1).write.option("header", "true").csv(dir + "/members_init_expiration")

      df
    }

    def merge(train_test: DataFrame, user: Seq[DataFrame], item: Seq[DataFrame]): DataFrame = {
      def _join(df: DataFrame, cols: String, right: Seq[DataFrame], i: Int = 0): DataFrame = if (i < right.length) _join(df.join(right(i), Seq(cols), "left"), cols, right, i + 1) else df

      _join(_join(train_test, "msno", user), "song_id", item)
    }

    /**
      * 对用'|'或'&'或'/'隔开的可多选特征，按照'|'或'&'或'/'切割，然后展开成多行，如：
      * +---------+-------------------+----------------+--------------+
      * |genre_ids|        artist_name|        composer|      lyricist|
      * +---------+-------------------+----------------+--------------+
      * |      465|張信哲 (Jeff Chang)|            董貞|        何啟弘|
      * |      444|          BLACKPINK|TEDDY&Bekuh BOOM|         TEDDY|
      * |      465|              S.H.E|          湯小康|        徐世珍|
      * |864|57|85|           貴族精選|    Joe Hisaishi|Hayao Miyazaki|
      * +---------+-------------------+---------------+---------------+
      * explode成如下：
      * +---------+-------------------+---------------+--------------+
      * |genre_ids|        artist_name|       composer|      lyricist|
      * +---------+-------------------+---------------+--------------+
      * |      465|張信哲 (Jeff Chang)|           董貞|        何啟弘|
      * |      444|          BLACKPINK|          TEDDY|         TEDDY|
      * |      444|          BLACKPINK|     Bekuh BOOM|         TEDDY|
      * |      465|              S.H.E|         湯小康|        徐世珍|
      * |      864|           貴族精選|   Joe Hisaishi|Hayao Miyazaki|
      * |       57|           貴族精選|   Joe Hisaishi|Hayao Miyazaki|
      * |       85|           貴族精選|   Joe Hisaishi|Hayao Miyazaki|
      * +---------+-------------------+---------------+--------------+
      *
      * @param dataFrame ：表
      * @param save      ：是否保存
      * @param cols      需要切割explode的列
      * @return 展开成多行以后的表
      */
    def explode(dataFrame: DataFrame, save: Boolean, cols: String*): DataFrame = {
      def _explode(df: DataFrame, i: Int = 0): DataFrame = if (i < cols.length) _explode(df.withColumn(cols(i), functions.explode(functions.split(df(cols(i)), "[\\||&|/]"))), i + 1) else df

      val df = _explode(dataFrame)
      if (save)
        df.coalesce(1).write.option("header", "true").csv(dir + "/songs_explode")
      df
    }

    /**
      * 给train集数据的user和item创建唯一数字id
      *
      * @param save 是否保存
      * @return user_index和item_index
      */
    def incrementIndex(save: Boolean = false): (DataFrame, DataFrame) = {
      val train_members_df = read_csv(train_csv).join(read_csv(members_csv), Seq("msno"), "left")
      val df = train_members_df.orderBy(train_members_df("registration_init_time").asc_nulls_last)
      val msno_index = zipWithIndex(spark, "user_id", df.select(df("msno")).distinct())
      val song_index = zipWithIndex(spark, "item_id", df.select(df("song_id")).distinct())

      if (save) {
        msno_index.coalesce(1).write.option("header", "true").csv(dir + "/user_index")
        song_index.coalesce(1).write.option("header", "true").csv(dir + "/item_index")
      }
      (msno_index, song_index)
    }

    /**
      * test集user和item的自增id
      *
      * @param user_index 历史user自增编号
      * @param item_index 历史item自增编号
      * @param save       是否保存
      * @return user_index和item_index
      */
    def fillIndex(user_index: DataFrame, item_index: DataFrame, save: Boolean = false): (DataFrame, DataFrame) = {
      var test_user_index: DataFrame = null
      var test_item_index: DataFrame = null
      val testCsv = read_csv(test_csv)

      if (user_index != null) {
        val test_user_df = testCsv.select(testCsv("msno")).distinct().join(user_index, Seq("msno"), "left")
        val msnoDF = test_user_df.where(test_user_df("user_id").isNull).select("msno").join(read_csv(members_csv), Seq("msno"), "left")
        val msno_index = zipWithIndex(spark, "user_id", msnoDF.orderBy(msnoDF("registration_init_time").asc_nulls_last).select("msno"), user_index.count())
        test_user_index = test_user_df.where(test_user_df("user_id").isNotNull).unionByName(msno_index)
        if (save)
          test_user_index.coalesce(1).write.option("header", "true").csv(dir + "/test_user_index")
      }

      if (item_index != null) {
        val test_item_df = testCsv.select(testCsv("song_id")).distinct().join(item_index, Seq("song_id"), "left")
        val song_index = zipWithIndex(spark, "item_id", test_item_df.where(test_item_df("item_id").isNull).select("song_id"), item_index.count())
        test_item_index = test_item_df.where(test_item_df("item_id").isNotNull).unionByName(song_index)
        if (save)
          test_item_index.repartition(1).write.option("header", "true").csv(dir + "/test_item_index")
      }

      (test_user_index, test_item_index)
    }
  }


  def zipWithIndex(spark: SparkSession, id: String, df: DataFrame, startIndex: Long = 0): DataFrame = {
    val schema = df.schema.add(StructField(id, LongType)) // 在原Schema信息的基础上添加一列 “id”信息
    val dfRDD = df.rdd.zipWithIndex() // DataFrame转RDD 然后调用 zipWithIndex
    val rowRDD = dfRDD.map(tp => Row.merge(tp._1, Row(tp._2 + startIndex)))
    spark.createDataFrame(rowRDD, schema) // 将添加了索引的RDD 转化为DataFrame
  }
}
