// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.

package com.microsoft.azure.synapse.ml.lightgbm.split2

import com.microsoft.azure.synapse.ml.lightgbm.{LightGBMConstants, LightGBMRegressionModel}
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.ml.tuning.{CrossValidator, ParamGridBuilder, TrainValidationSplit}
import org.apache.spark.sql.functions.{avg, col, lit, when}
import org.apache.spark.sql.{DataFrame, Row}

// scalastyle:off magic.number

/** Tests to validate the functionality of LightGBM module in streaming mode.
  */
class VerifyLightGBMRegressorStream extends LightGBMRegressorTestData {
  override val dataTransferMode: String = LightGBMConstants.StreamingDataTransferMode

  test(verifyLearnerTitleTemplate.format(energyEffFile, dataTransferMode)) {
    verifyLearnerOnRegressionCsvFile(energyEffFile, "Y1", 0,
      Some(Seq("X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "Y2")))
  }
  test(verifyLearnerTitleTemplate.format(airfoilFile, dataTransferMode)) {
    verifyLearnerOnRegressionCsvFile(airfoilFile, "Scaled sound pressure level", 1)
  }
  test(verifyLearnerTitleTemplate.format(tomsHardwareFile, dataTransferMode)) {
    verifyLearnerOnRegressionCsvFile(tomsHardwareFile, "Mean Number of display (ND)", -4)
  }
  test(verifyLearnerTitleTemplate.format(machineFile, dataTransferMode)) {
    verifyLearnerOnRegressionCsvFile(machineFile, "ERP", -2)
  }
  /* TODO: Spark doesn't seem to like the column names here because of '.', figure out how to read in the data
  test(verifyLearnerTitleTemplate.format(slumpFile, executionMode)) {
    verifyLearnerOnRegressionCsvFile(slumpFile, "Compressive Strength (28-day)(Mpa)", 2)
  } */
  test(verifyLearnerTitleTemplate.format(concreteFile, dataTransferMode)) {
    verifyLearnerOnRegressionCsvFile(concreteFile, "Concrete compressive strength(MPa, megapascals)", 0)
  }

  test("Compare benchmark results file to generated file") {
    verifyBenchmarks()
  }

  test("Verify LightGBM Regressor can be run with TrainValidationSplit" + executionModeSuffix) {
    (0 until 20).foreach(_ => getAndIncrementPort())
    val model = baseModel

    val paramGrid = new ParamGridBuilder()
      .addGrid(model.numLeaves, Array(5, 10))
      .addGrid(model.numIterations, Array(10, 20))
      .addGrid(model.lambdaL1, Array(0.1, 0.5))
      .addGrid(model.lambdaL2, Array(0.1, 0.5))
      .build()

    val trainValidationSplit = new TrainValidationSplit()
      .setEstimator(model)
      .setEvaluator(new RegressionEvaluator().setLabelCol(labelCol))
      .setEstimatorParamMaps(paramGrid)
      .setTrainRatio(0.8)
      .setParallelism(2)

    val fitModel = trainValidationSplit.fit(airfoilDF)
    fitModel.transform(airfoilDF)
    assert(fitModel != null)

    // Validate lambda parameters set on model
    val modelStr = fitModel.bestModel.asInstanceOf[LightGBMRegressionModel].getModel.modelStr.get
    assert(modelStr.contains("[lambda_l1: 0.1]") || modelStr.contains("[lambda_l1: 0.5]"))
    assert(modelStr.contains("[lambda_l2: 0.1]") || modelStr.contains("[lambda_l2: 0.5]"))
  }

  test("Verify LightGBM with single dataset mode" + executionModeSuffix) {
    val df = airfoilDF
    val model = baseModel.setUseSingleDatasetMode(true)
    model.fit(df).transform(df).show()

    val models = baseModel.setUseSingleDatasetMode(false)
    models.fit(df).transform(df).show()
  }

  test("Verify LightGBM Regressor with weight column" + executionModeSuffix) {
    val df = airfoilDF.withColumn(weightCol, lit(1.0))

    val model = baseModel.setWeightCol(weightCol)
    val dfWeight = df.withColumn(weightCol, when(col(labelCol) > 120, 1000.0).otherwise(1.0))

    def avgPredictions(df: DataFrame): Double = {
      model.fit(df).transform(df).select(avg("prediction")).first().getDouble(0)
    }

    assert(avgPredictions(df) < avgPredictions(dfWeight))
  }

  test("Verify LightGBM Regressor categorical parameter" + executionModeSuffix) {
    val Array(train, test) = flareDF.randomSplit(Array(0.8, 0.2), seed)
    val model = baseModel.setCategoricalSlotNames(flareDF.columns.filter(_.startsWith("c_")))
    val metric = regressionEvaluator.evaluate(model.fit(train).transform(test))

    // Verify we get good result
    assert(metric < 0.62)
  }

  test("Verify LightGBM Regressor with bad column names fails early" + executionModeSuffix) {
    val baseModelWithBadSlots = baseModel.setSlotNames(Range(0, 22).map(i =>
      "Invalid characters \",:[]{} " + i).toArray)
    interceptWithoutLogging[IllegalArgumentException] {
      baseModelWithBadSlots.fit(flareDF).transform(flareDF).collect()
    }
  }

  test("Verify LightGBM Regressor with tweedie distribution" + executionModeSuffix) {
    (0 until 10).foreach(_ => getAndIncrementPort())
    val model = baseModel.setObjective("tweedie").setTweedieVariancePower(1.5)

    val paramGrid = new ParamGridBuilder()
      .addGrid(model.tweedieVariancePower, Array(1.0, 1.2, 1.4, 1.6, 1.8, 1.99))
      .build()

    val cv = new CrossValidator()
      .setEstimator(model)
      .setEvaluator(new RegressionEvaluator().setLabelCol(labelCol))
      .setEstimatorParamMaps(paramGrid)
      .setNumFolds(3)
      .setParallelism(2)

    // Choose the best model for tweedie distribution
    assertFitWithoutErrors(cv, airfoilDF)
  }

  test("Verify LightGBM Regressor features shap" + executionModeSuffix) {
    val Array(train, test) = flareDF.randomSplit(Array(0.8, 0.2), seed)
    val untrainedModel = baseModel
      .setFeaturesShapCol(featuresShapCol)
      .setCategoricalSlotNames(flareDF.columns.filter(_.startsWith("c_")))
    val model = untrainedModel.fit(train)

    val evaluatedDf = model.transform(test)

    val featuresShap: Array[Double] = evaluatedDf.select(featuresShapCol).rdd.map {
      case Row(v: Vector) => v
    }.first.toArray

    assert(featuresShap.length == (model.getModel.numFeatures + 1))

    // if featuresShap is not wanted, it is possible to remove it.
    val evaluatedDf2 = model.setFeaturesShapCol("").transform(test)
    assert(!evaluatedDf2.columns.contains(featuresShapCol))
  }
}
