package doit20.sparksql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.apache.spark.sql.expressions.MutableAggregationBuffer
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-04-10
 * @desc sparksql保存计算结果api示例
 */
object Demo12 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("dsl风格api演示")
      .config("spark.sql.shuffle.partitions", 1) // spark-submit --master yarn --deploy-mode cluster --spark.sql.shuffle.partitions=400
      .master("local")
      .enableHiveSupport()
      .getOrCreate()

    // id,name,age,score,gender
    val schema = StructType(Seq(
      StructField("id", DataTypes.IntegerType),
      StructField("name", DataTypes.StringType),
      StructField("age", DataTypes.IntegerType),
      StructField("score", DataTypes.DoubleType),
      StructField("gender", DataTypes.StringType)
    ))
    val frame = spark.read.schema(schema).option("header", "true").csv("data/stu2.txt")


    // 保存为parquet文件
    // frame.write.parquet("/path/path")

    // 保存为orc文件
    // frame.write.orc("/path/path")

    // 保存为csv文件
    // frame.write.csv("/path/path")

    // 保存为json文件
    // frame.write.json("/path/path")

    // 保存为普通text文件, 就得把一条数据的结构消除！！！变成一个字段（字符串）
    //frame.write.text("data/output")

    /*
     * 把df退化到rdd后用map算子去拼接字符串
     * === 脱了裤子放p
     */
    val res: RDD[String] = frame.rdd.map(row => {
      val id = row.getAs[Int]("id")
      val name = row.getAs[String]("name")
      val age = row.getAs[Int]("age")
      val score = row.getAs[Double]("score")
      val gender = row.getAs[String]("gender")

      Array(id, name, age, score, gender).mkString(",")
    })

    import spark.implicits._
    res.toDF().write.text("data/output")

    // 用模式匹配从row中取数据
    /*    frame.rdd.map(
      row => {
        row match {
          case Row(id: Int, name: String, age: Int, score: Double, gender: String) => Array(id, name, age, score, gender).mkString(",")
          case buffer: MutableAggregationBuffer =>
          case _ =>
        }
      })
   */

    // 利用偏函数和模式匹配的特性，简化了语法
    frame.rdd.map(
      {
        case Row(id: Int, name: String, age: Int, score: Double, gender: String) => Array(id, name, age, score, gender).mkString(",")
      }
    )

    // {case Row ....} 这会被编译器编译成一个偏函数
    val f:PartialFunction[Row,String] = {
      case Row(id: Int, name: String, age: Int, score: Double, gender: String) => Array(id, name, age, score, gender).mkString(",")
      case _ => ""
    }
    frame.rdd.map(f)
    /**
     * 偏函数是一类特殊函数
     * 它可以只处理传入参数的一部分范围
     */
    val f2 = new PartialFunction[Row,String] {
      override def isDefinedAt(x: Row): Boolean = true

      override def apply(v1: Row): String = v1 match {
        case Row(id:Int,name:String) =>""
        case _ => ""
      }
    }


    /*
     * 直接用sql来拼接字符串更简便
     */
    import org.apache.spark.sql.functions._
    frame.select(concat_ws(",", 'id, 'name, 'age, 'score, 'gender)).write.text("data/output")


    spark.close()
  }

}
