package cn.doitedu.operate

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * @Date 22.4.10
 * @Created by HANGGE
 * @Description
 */
object C03_DSL_Column_Rename {
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    // 1 获取会话
    val session = SparkSession.builder()
      .appName(this.getClass.getSimpleName)
      .master("local[*]")
      .getOrCreate()
     // 导入SQL函数和隐式
    import session.implicits._
    // 具有丰富的操作字段的函数
    import org.apache.spark.sql.functions._
    // 2 加载数据   创建DataFrame
    val df: DataFrame = session.read.option("header" , true).option("inferSchema",true).csv("file:///D://code/doit30_spark_sql/data/csv/Teacher2.csv")

    df.select('id as "myid" , $"name".as("myname") , col("age") as "myage") .show()
    println("=====================================")
    df.selectExpr("id as myid" , "name as myname" , "age myage") .show()
    println("=====================================")
    df.select("id" , "name" , "age")
      .withColumnRenamed("id","myid")
      .withColumnRenamed("name","yourname")
      .withColumnRenamed("age","ourage").show()
  println("----------------------没有头信息的数据-------------------")
    /**
     * 默认的结构是   _c01  _c02  _c03 ....
     */
    val df2: DataFrame = session.read.option("inferSchema",true).csv("file:///D://code/doit30_spark_sql/data/csv/Teacher.csv")

    df2.select('_c0 as "id") .show()
    // 和原来的DF中的字段的个数保持一致
    val df3 = df2.toDF("id", "name", "age", "gender", "city")
    df3.printSchema()

  }

}
