package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable.ListBuffer

object Code14RDDAndDF {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", "3")
      .getOrCreate()

    /**
     * RDD to DF
     * 方式1：RDD中类型为tuple,那么toDF时需要给定列名称
     * 方式2：RDD中类型为样例类，那么toDF时不需要给定列信息
     *
     * DF to RDD
     * 可以直接.rdd 获取RDD的信息
     * 其中该RDD中的数据类型为Row类，该类中对应有一些获取列数据的函数
     * 比如get getAs[类型](下标/列名称)
     *
     */


    val sc: SparkContext = spark.sparkContext

    //    val stuRDD: RDD[(String, String, String, String, String)] = sc
    //      .textFile("spark_code/data/students.txt")
    //      .map {
    //        case line => {
    //          val splitRes: Array[String] = line.split(",")
    //          (splitRes(0), splitRes(1), splitRes(2), splitRes(3), splitRes(4))
    //        }
    //      }
    val stuRDD = sc
      .textFile("spark_code/data/students.txt")
      .map {
        case line => {
          val splitRes: Array[String] = line.split(",")
          StuRDD(splitRes(0), splitRes(1), splitRes(2).toInt, splitRes(3), splitRes(4))
        }
      }


    import spark.implicits._
    //    val stuDF: DataFrame = stuRDD
    //      .toDF("id", "name", "age", "gender", "clazz")

    val stuDF: DataFrame = stuRDD
      .toDF()


    val DF2rdd: RDD[Row] = stuDF
      .rdd

    //    DF2rdd
    //      .groupBy(
    //      Row => Row.getAs[String]("clazz")
    //    ).mapValues(
    //      _.size
    //    ).foreach(println)

    //    stuDF.show()


    val burkDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      // 1500100001,施笑槐,22,女,文科六班
      .schema("burk String,year int,tsl01 int,tsl02 int,tsl03 int,tsl04 int,tsl05 int,tsl06 int,tsl07 int,tsl08 int,tsl09 int,tsl10 int,tsl11 int,tsl12 int")
      .load("spark_code/data/burks.csv")


    val burkRdd: RDD[Row] = burkDF
      .rdd

    burkRdd.flatMap {
      case row => {
        val burk: String = row.getAs[String]("burk")
        val year: Int = row.getAs[Int]("year")

        val listBuffer: ListBuffer[(String, Int, Int, Int)] = ListBuffer()
        var month = 1
        while (month <= 12) {
          listBuffer.append((burk, year, month, row.getAs[Int](month + 1)))
          month += 1
        }
        listBuffer
      }
    }.toDF("burk","year","mon","salary").createTempView("burk_tbl")

    //  统计每个公司每年按月累计收入 输出结果：公司代码,年度,月份,当月收入,累计收入
    spark.sql(
      """
        |SELECT
        |*
        |,sum(salary) over(partition by burk,year order by mon) as sum_salary
        |FROM burk_tbl
        |
        |""".stripMargin).show()


  }
}

case class StuRDD(id: String, name: String, age: Int, gender: String, clazz: String)
