package com.feidee.fd.sml.algorithm.util

import java.util.Arrays

import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  * @Author songhaicheng
  * @Date 2018/08/22
  * @Email: haicheng_song@sui.com
  */
object TestingDataGenerator {

  lazy val spark: SparkSession = SparkSession
    .builder
    .master("local")
    .appName("Spark Test")
    .getOrCreate()

  /**
    * 返回特征字段值为递增的 DataFrame
    *
    * @param numFeatures  特征维度数 = 行数 - 1
    * @param numInstances 测试数据实例数 = 列数
    * @param numClasses   类数量，默认二分类
    * @return DataFrame with Struct [features(vector), label(double)]
    */
  def makeOrderedTrainingData(numFeatures: Int, numInstances: Int, numClasses: Int = 2): DataFrame = {
    val data = new Array[Array[Double]](numInstances)
    for (i <- 0 until numInstances) {
      val label = i % numClasses
      data(i) = Array.fill(numFeatures)(i.toDouble).:+(label.toDouble)
    }
    val rdd = spark.sparkContext.parallelize(data).map(v => Row.fromSeq(v.toSeq))
    val st = StructType((0 to numFeatures).map(i => StructField(s"_c$i", DoubleType)))
    var dataFrame = spark.createDataFrame(rdd, st)

    val assembler = new VectorAssembler().setInputCols(st.fieldNames.dropRight(1)).setOutputCol("features")
    dataFrame = assembler.transform(dataFrame).withColumn("label", dataFrame.col(s"_c$numFeatures"))
    dataFrame.select("features", "label")
  }

  def makeOrderedFeaturesData(): DataFrame = {
    val test = spark.createDataFrame(Seq(
      (1.0, "xx", "cc1", "hhh1", 1.0, 20.0, 14.0),
      (0.0, "xx1", "xx", "hhh2", 1.0, 20.0, 14.0),
      (1.0, "xx2", "xxf", "hhh3", 1.0, 20.0, 14.0)
    )).toDF("label", "features", "features2", "features3", "notFeatures1", "notFeatures2", "notFeatures3")
    test
  }

  def makeChiSqSelectorData(): DataFrame = {
    val df = spark.createDataFrame(Seq(
      (7, Vectors.dense(0.0, 0.0, 18.0, 1.0), 1.0),
      (8, Vectors.dense(0.0, 1.0, 12.0, 0.0), 0.0),
      (9, Vectors.dense(1.0, 0.0, 15.0, 0.1), 0.0)
    )).toDF("id", "features", "clicked")
    df
  }

  def makeImputerData(): DataFrame = {
    val df = spark.createDataFrame(Seq(
      (1.0, Double.NaN),
      (2.0, Double.NaN),
      (Double.NaN, 3.0),
      (4.0, 4.0),
      (5.0, 5.0),
      (6.0, Double.NaN),
      (6.0, 8.0)
    )).toDF("a", "b")
    df
  }

  def makeRFormulaData(): DataFrame = {
    val dataset = spark.createDataFrame(Seq(
      (7, "US", 18, 1.0),
      (8, "CA", 12, 0.0),
      (9, "NZ", 15, 0.0),
      (10, "JP", 15, 0.0),
      (11, "FG", 15, 0.0)
    )).toDF("id", "country", "hour", "clicked")
    dataset
  }

  def makeVectorSlicerData(): DataFrame = {
    val data = Arrays.asList(
      Row(Vectors.sparse(3, Seq((0, -2.0), (1, 2.3)))),
      Row(Vectors.sparse(4, Seq((0, -1.0), (2, 2.3)))),
      Row(Vectors.dense(-2.0, 2.3, 0.0)),
      Row(Vectors.dense(-9.0, 2.3, 1.0))
    )

    val defaultAttr = NumericAttribute.defaultAttr
    val attrs = Array("f1", "f2", "f3").map(defaultAttr.withName)
    val attrGroup = new AttributeGroup("userFeatures", attrs.asInstanceOf[Array[Attribute]])

    val dataset = spark.createDataFrame(data, StructType(Array(attrGroup.toStructField())))
    dataset
  }

  def makeAFTSurvivalRegressionData(): DataFrame = {
    val training = spark.createDataFrame(Seq(
      (1.218, 1.0, Vectors.dense(1.560, -0.605)),
      (2.949, 0.0, Vectors.dense(0.346, 2.158)),
      (3.627, 0.0, Vectors.dense(1.380, 0.231)),
      (0.273, 1.0, Vectors.dense(0.520, 1.151)),
      (4.199, 0.0, Vectors.dense(0.795, -0.226))
    )).toDF("label", "censor", "features")
    training.show()
    training
  }

  /**
    * 根据给定的数据和字段生成 DataFrame
    *
    * @example TestingDataGenerator.make2ColData(Seq(("d11", "d21"),("d12", "d22")), "col0", "col1")
    * @param data     实际数据
    * @param colNames 字段名
    * @return DataFrame that you need
    */
  def makeSingleColData(data: Seq[String], colNames: String*): DataFrame = {
    // 生成单行数据的 DataFrame，一般用在生成待预测数据用
    import spark.implicits._
    data.toDF(colNames: _*)
  }

  def make2ColData(data: Seq[(String, String)], colNames: String*): DataFrame = {
    import spark.implicits._
    data.toDF(colNames: _*)
  }

  // 普希金诗一首，可用来做分词、文档分析
  def pushkinPoem: DataFrame = {
    import spark.implicits._
    Seq("If I walk the noisy streets. Or enter a many thronged church. Or sit among the wild young generation. I give way to my thoughts.",
      "I say to myself: the years are fleeting. And however many there seem to be. We must all go under the eternal vault. And someone's hour is already at hand.",
      "When I look at a solitary oak I think: the patriarch of the woods. It will outlive my forgotten age. As it outlived that of my grandfathers'.",
      "If I caress a young child. Immediately I think: farewell! I will yield my place to you. For I must fade while your flower blooms.",
      "Each day, every hour. I habitually follow in my thoughts. Trying to guess from their number. The year which brings my death.",
      "And where will fate send death to me? In battle, in my travels, or on the seas? Or will the neighbouring valley. Receive my chilled ashes?",
      "And although to the senseless body. It is indifferent wherever it rots. Yet close to my beloved countryside. I still would prefer to rest.",
      "And let it be, beside the grave's vault. That young life forever will be playing. And impartial, indifferent nature. Eternally be shining in beauty.")
    .toDF("sentence")
  }

  def loadDataFromTxt(path: String): DataFrame = {
    spark.read.format("libsvm").load(path)
  }

  def sampleLibsvmData: DataFrame = {
    loadDataFromTxt("src/test/data/sample_libsvm_data.txt")
  }

  def sampleBinaryClassificationData: DataFrame = {
    loadDataFromTxt("src/test/data/sample_binary_classification_data.txt")
  }

  def sampleMulticlassClassificationData: DataFrame = {
    loadDataFromTxt("src/test/data/sample_multiclass_classification_data.txt")
  }

  def sampleLinearRegressionData: DataFrame = {
    loadDataFromTxt("src/test/data/sample_linear_regression_data.txt")
  }

  def sampleIsotonicRegressionData: DataFrame = {
    loadDataFromTxt("src/test/data/sample_isotonic_regression_libsvm_data.txt")
  }

}
