package io.sqrtqiezi.spark.regex

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

object RegexSample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local")
      .appName("regex sample")
      .getOrCreate()

    val df = spark.read.format("csv")
      .option("inferSchema", "true")
      .option("header", "true")
      .load("data/retail-data/by-day/2010-12-01.csv")

    df.show

    val simpleColors = Seq("black", "white", "red", "green", "blue")
    val regexString = simpleColors.map(_.toUpperCase).mkString("|")
    df.select(
      regexp_replace(col("Description"), regexString, "COLOR").alias("color_clean"),
      col("Description")
    ).show

    df.select(translate(col("Description"), "LEFT", "1337"), col("Description"))
      .show(2)

    val regexString2 = simpleColors.map(_.toUpperCase).mkString("(", "|", ")")
    df.select(
      regexp_extract(col("Description"), regexString2, 1).alias("color_clean"),
      col("Description")
    ).show

    val containsBlack = col("Description").contains("BLACK")
    val containsWhite = col("Description").contains("WHITE")
    df.withColumn("hasSimpleColor", containsBlack.or(containsWhite))
      .where("hasSimpleColor")
      .select("Description")
      .show(3, false)

    val selectedColumns = simpleColors.map(color => {
      col("Description").contains(color.toUpperCase).alias(s"is_$color")
    }) :+ expr("*")

    selectedColumns.foreach(println)

    println(df.select(selectedColumns:_*).explain)

    df.select(selectedColumns:_*)
      .where(col("is_white").or(col("is_red")))
      .show(3, false)
  }
}
