package com.guchenbo.spark.sql

import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SaveMode, SparkSession}

import scala.collection.mutable.ListBuffer
import scala.util.Random

/**
 * @author guchenbo
 * @date 2022/4/13
 */
object Rdd2Df {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("demo").getOrCreate()
    val sc = spark.sparkContext

    val path = "/user/guchenbo/model_report_psi_col_df.csv"
    val df = spark.read.format("csv").option("header", "true").load(path)

    var schema = df.schema
    schema = schema.add(StructField("ds", StringType, nullable = true))

    val f: (Row, String) => Row = (e: Row, ds: String) => Row(e(0), (e(0).asInstanceOf[String].toInt % 2).toString, e(2), e(3), ds)


    val rdd1 = sc.makeRDD(df.take(50)).map(e => f(e, "2022-04-01"))
    val rdd2 = sc.makeRDD(df.take(50)).map(f(_, "2022-05-01"))

    val rdd = rdd1.union(rdd2)
    val df1000 = spark.createDataFrame(rdd, schema)

    df1000.write.partitionBy("ds").mode(SaveMode.Overwrite).saveAsTable("turing_monitor.model_psi_test")

    val bu = ListBuffer[(Int, Double)]()

    for (i <- 0 to 10000) {
      bu.append(Tuple2(i % 2, Random.nextInt(10) + Random.nextDouble().formatted("%.2f").toDouble))
    }
    val list = bu.toList
    val rdd3 = sc.makeRDD(list).map(e => Row("score", e._1, e._2))
    var df2 = spark.createDataFrame(rdd3, StructType(List(StructField("feature", StringType), StructField("type", IntegerType), StructField("value", DoubleType))))

    df2.write.format("csv").option("header", "true").mode(SaveMode.Overwrite).save("/user/guchenbo/score.csv")
  }
}
