package com.cxk

import com.cxk.fe.LightGBM_FE.{_0_, song_length}
import com.cxk.fe.{FeatureEngineering, LightGBM_FE, MeanEncoder}
import com.cxk.ml.TD_IDF
import org.apache.spark.sql.{DataFrame, SparkSession, functions}
import org.apache.spark.storage.StorageLevel

import scala.collection.mutable

object JobSubmitter {

  def main(args: Array[String]): Unit = {
    if (System.getProperty("hadoop.home.dir") == null)
      System.setProperty("hadoop.home.dir", "D:\\hadoop-3.0.0")

    val dir = args(0)

    //创建与Spark交互的会话
    val spark = SparkSession.builder().appName("KBox").getOrCreate()

    val fe = FeatureEngineering(dir, spark)
    //    val (userIndex, itemIndex) = fe.incrementIndex(true)
    //    fe.fillIndex(userIndex, itemIndex, save = true)

    //    fe.explode(fe.read_csv(dir + "/songs.csv"), true, "genre_ids", "artist_name", "composer", "lyricist")

    //    ALSRecommend.als(spark, fe.read_csv(dir + "/train.csv"), fe.read_csv(dir + "/user_index"), fe.read_csv(dir + "/item_index"))

    //    val train_cvs = fe.read_csv(dir + "/train.csv")

    //    val msnoEncoder = MeanEncoder(spark, Seq("msno"), MeanEncoder.priorWeightFunc(239.87702812550805, 314.13078816822343)).fit(train_cvs, "target")
    //    msnoEncoder.mean_code.foreach(df => df._2.repartition(1).write.option("header", "true").csv(dir + "/_mean_code_" + df._1))
    //
    //    val song_idEncoder = MeanEncoder(spark, Seq("song_id"), MeanEncoder.priorWeightFunc(20.494763394320575, 182.23722317522368)).fit(train_cvs, "target")
    //    song_idEncoder.mean_code.foreach(df => df._2.repartition(1).write.option("header", "true").csv(dir + "/_mean_code_" + df._1))
    //
    //    msnoEncoder.prior.foreach(p => println(p._1 + "-=>" + p._2))
    //    song_idEncoder.prior.foreach(p => println(p._1 + "=+>" + p._2))

    //    train_cvs.selectExpr("song_id", "1 AS c").groupBy("song_id").agg(functions.count("c").alias("count"))
    //      .selectExpr("AVG(count)", "STDDEV(count)").show()

    //    fe.read_csv(dir + "/_mean_code_msno").selectExpr("AVG(msno_predict_1)", "MIN(msno_predict_1)", "MAX(msno_predict_1)", "STDDEV(msno_predict_1)").show()

    //    fe.members_registration_expiration(true)

    //    fe.read_csv(dir + "/members.csv").selectExpr("CAST(bd AS INT) AS bd")
    //      .selectExpr("(CASE WHEN bd>0 AND bd<100 THEN bd ELSE -1 END) AS h", "1 AS i")
    //      .groupBy("h").agg(functions.count("i").alias("i")).orderBy("i")
    //      .show(200)
    //    fe.read_csv(dir + "/members.csv").where("bd=1051").select("bd").show()

    //    spark.sql("select pow(2,3.46063722919745052)").show()


    //    fe.explode(fe.read_csv(dir + "/songs.csv"), save = true, "genre_ids", "artist_name", "composer", "lyricist")


    //    fe.read_csv(dir + "/members.csv").selectExpr("registered_via", "1 AS t").groupBy("registered_via")
    //      .agg(functions.count("t").alias("t")).orderBy("t").show()

    //    LightGBM_FE._0_source_type(fe.read_csv(dir + "/train.csv")).selectExpr("source_type", "1 AS t")
    //      .groupBy("source_type").agg(functions.count("t").alias("t")).orderBy("t").show()


    //    val train = fe.read_csv(dir + "/train.csv")
    //    val songs = fe.read_csv(dir + "/songs.csv")
    //    val data = train.join(songs, Seq("song_id"), "left").persist(StorageLevel.MEMORY_AND_DISK)
    //    val df = (col: String) => fe.explode(data.select(col, "target"), save = false, col).persist(StorageLevel.MEMORY_AND_DISK)
    //
    //    val artistNameEncoder = MeanEncoder(spark, Seq("artist_name"), MeanEncoder.priorWeightFunc(167.73560654579182, 2546.0614526011373)).fit(df("artist_name"), "target")
    //    artistNameEncoder.mean_code.foreach(df => df._2.repartition(1).write.option("header", "true").csv(dir + "/_mean_code_" + df._1))
    //
    //    val composerEncoder = MeanEncoder(spark, Seq("composer"), MeanEncoder.priorWeightFunc(103.69816345553063, 1164.2442451329362)).fit(df("composer"), "target")
    //    composerEncoder.mean_code.foreach(df => df._2.repartition(1).write.option("header", "true").csv(dir + "/_mean_code_" + df._1))
    //
    //    val lyricistEncoder = MeanEncoder(spark, Seq("lyricist"), MeanEncoder.priorWeightFunc(139.66620538803204, 1477.5987260579288)).fit(df("lyricist"), "target")
    //    lyricistEncoder.mean_code.foreach(df => df._2.repartition(1).write.option("header", "true").csv(dir + "/_mean_code_" + df._1))
    //
    //    lyricistEncoder.prior.foreach(p => println(p._1 + "=÷>" + p._2))
    //    artistNameEncoder.prior.foreach(p => println(p._1 + "=+>" + p._2))
    //    composerEncoder.prior.foreach(p => println(p._1 + "-+>" + p._2))

    //    val df = fe.read_csv(dir + "/train.csv").select("msno", "song_id").groupBy("msno")
    //      .agg(functions.countDistinct("song_id").alias("msno_count"))
    //    FeatureEngineering.zipWithIndex(spark, "user_id", df.orderBy(df("msno_count").desc))
    //      .repartition(1).write.option("header", "true").csv(dir + "/_user_index")


    //    val count = fe.explode(fe.read_csv(dir + "/songs.csv"), false, "artist_name")
    //      .select("artist_name", "song_id").groupBy("artist_name")
    //      .agg(functions.countDistinct("song_id").alias("rank"))
    //    val count_df = FeatureEngineering.zipWithIndex(spark, "artist_id", count.orderBy(count("rank").desc))
    //    count_df.repartition(1).write.option("header", "true").csv(dir + "/artist_index")

    //    LightGBM_FE.lightGBMfe(spark, "D:\\BaiduNetdiskDownload\\Music_Recommendation\\LightGBM", hasTarget = true)
    //    LightGBM_FE.lightGBMfe(spark, "D:\\BaiduNetdiskDownload\\Music_Recommendation\\LightGBM")
    //        fe.read_csv("D:\\BaiduNetdiskDownload\\Music_Recommendation\\LightGBM\\lightGBM_fe_train").printSchema()
    val d_ = "D:\\BaiduNetdiskDownload\\Music_Recommendation\\LightGBM"
        fe.read_csv(d_ + "/train_lightGBM").printSchema()

    //    val train_test_df = LightGBM_FE._0_(fe.read_csv(d_ + "/test.csv"), "id")
    //      .join(LightGBM_FE.song_extra_info(spark, fe.read_csv(d_ + "/song_extra_info.csv")), Seq("song_id"), "left")
    //      .join(LightGBM_FE.members(spark, fe.read_csv(d_ + "/members.csv"), fe.read_csv(d_ + "/members_init_expiration.csv")), Seq("msno"), "left")
    //    train_test_df.repartition(1).write.option("header", "true").csv(d_ + "/test_song_extra_info_members_tmp")


    //    val songs_df = song_length(fe.read_csv(d_ + "/songs.csv"))
    //    val explode = (col: String, d: DataFrame) => d.withColumn(col, functions.explode(functions.split(d(col), "[\\||&|/]")))

    //    explode("genre_ids", fe.read_csv(d_ + "/test_song_extra_info_members_tmp")
    //      .join(songs_df.select("song_id", "song_length", "genre_ids", "language"),
    //        Seq("song_id"), "left")).repartition(1).write.option("header", "true").csv(d_ + "/test_song_info_members_tmp")
    //
    //    explode("genre_ids", fe.read_csv(d_ + "/train_song_extra_info_members_tmp")
    //      .join(songs_df.select("song_id", "song_length", "genre_ids", "language"),
    //        Seq("song_id"), "left")).repartition(1).write.option("header", "true").csv(d_ + "/train_song_info_members_tmp")

     //    explode("artist_name", fe.read_csv(d_ + "/train_song_info_members_tmp")
    //      .join(songs_df.select("song_id", "artist_name"), Seq("song_id"), "left"))
    //      .join(fe.read_csv(d_ + "/mean_code_artist_name.csv"), Seq("artist_name"), "left")
    //      .join(fe.read_csv(d_ + "/artist_index.csv"), Seq("artist_name"), "left").drop("artist_name")
    //      .repartition(1).write.option("header", "true").csv(d_ + "/train_song_info_members_explode_artist")
    //
    //    explode("artist_name", fe.read_csv(d_ + "/test_song_info_members_tmp")
    //      .join(songs_df.select("song_id", "artist_name"), Seq("song_id"), "left"))
    //      .join(fe.read_csv(d_ + "/mean_code_artist_name.csv"), Seq("artist_name"), "left")
    //      .join(fe.read_csv(d_ + "/artist_index.csv"), Seq("artist_name"), "left").drop("artist_name")
    //      .repartition(1).write.option("header", "true").csv(d_ + "/test_song_info_members_explode_artist")

    //    explode("composer", fe.read_csv(d_ + "/test_song_info_members_explode_artist")
    //      .join(songs_df.select("song_id", "composer"), Seq("song_id"), "left"))
    //      .join(fe.read_csv(d_ + "/mean_code_composer.csv"), Seq("composer"), "left")
    //      .join(fe.read_csv(d_ + "/composer_index.csv"), Seq("composer"), "left").drop("composer")
    //      .repartition(1).write.option("header", "true").csv(d_ + "/test_song_info_members_explode_artist_composer")
    //
    //    explode("composer", fe.read_csv(d_ + "/train_song_info_members_explode_artist")
    //      .join(songs_df.select("song_id", "composer"), Seq("song_id"), "left"))
    //      .join(fe.read_csv(d_ + "/mean_code_composer.csv"), Seq("composer"), "left")
    //      .join(fe.read_csv(d_ + "/composer_index.csv"), Seq("composer"), "left").drop("composer")
    //      .repartition(1).write.option("header", "true").csv(d_ + "/train_song_info_members_explode_artist_composer")

    //    explode("lyricist", fe.read_csv(d_ + "/train_song_info_members_explode_artist_composer")
    //      .join(songs_df.select("song_id", "lyricist"), Seq("song_id"), "left"))
    //      .join(fe.read_csv(d_ + "/mean_code_lyricist.csv"), Seq("lyricist"), "left")
    //      .join(fe.read_csv(d_ + "/lyricist_index.csv"), Seq("lyricist"), "left").drop("lyricist")
    //      .repartition(1).write.option("header", "true").csv(d_ + "/train_song_info_members_explode_artist_composer_lyricist")
    //
    //    explode("lyricist", fe.read_csv(d_ + "/test_song_info_members_explode_artist_composer")
    //      .join(songs_df.select("song_id", "lyricist"), Seq("song_id"), "left"))
    //      .join(fe.read_csv(d_ + "/mean_code_lyricist.csv"), Seq("lyricist"), "left")
    //      .join(fe.read_csv(d_ + "/lyricist_index.csv"), Seq("lyricist"), "left").drop("lyricist")
    //      .repartition(1).write.option("header", "true").csv(d_ + "/test_song_info_members_explode_artist_composer_lyricist")

    val vm = Map("song_id_predict_1" -> 0.5035170905697991, "msno_predict_1" -> 0.5035170825455738
      , "artist_name_predict_1" -> 0.5331470717936908, "composer_predict_1" -> 0.5021177556771674, "lyricist_predict_1" -> 0.5192823846976313)

////
//    fe.read_csv(d_ + "/train_song_info_members_explode_artist_composer_lyricist")
//      .join(fe.read_csv(d_ + "/mean_code_msno.csv"), Seq("msno"), "left")
//      .join(fe.read_csv(d_ + "/user_index.csv"), Seq("msno"), "left").drop("msno")
//      .join(fe.read_csv(d_ + "/mean_code_song_id.csv"), Seq("song_id"), "left")
//      .join(fe.read_csv(d_ + "/item_index.csv"), Seq("song_id"), "left").drop("song_id")
//      .na.fill(vm).na.fill(-1).repartition(1).write.option("header", "true").csv(d_ + "/train_lightGBM")

    spark.stop()
  }
}
