package com.niit.spark.sql.test

import org.apache.spark.sql.functions.col
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * Date:2025/5/14
 * Author：Ys
 * Description:
 */
object AddColumn {

  def main(args: Array[String]): Unit = {


    val spark = SparkSession.builder().appName("JoinDataFrames").master("local[*]").getOrCreate()
    spark.sparkContext.setLogLevel("ERROR")
    import spark.implicits._

    val df: DataFrame = spark.read.option("header", "true").csv("input/sql/employees.csv")

    //新增一列 列名为new_salary 是 salary 1.1倍
    /*
      withColumn的第一个参数：要新加的列名
                    第二个参数：要新加的列的值
     */
    df.withColumn( "new_salary",col("salary")*1.1 ).show()



    spark.stop()


  }

}
