package cn.doitedu.operate

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * @Date 22.4.10
 * @Created by HANGGE
 * @Description
 */
object C07_DSL_Join {
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    // 1 获取会话
    val session = SparkSession.builder()
      .appName(this.getClass.getSimpleName)
      .master("local[*]")
      .getOrCreate()
    import  session.implicits._
    import org.apache.spark.sql.functions._

     // 导入SQL函数和隐式
    // 具有丰富的操作字段的函数
    // 2 加载数据   创建DataFrame
    val df1: DataFrame = session.read.option("header" , true).option("inferSchema",true).csv("file:///D://code/doit30_spark_sql/data/csv/Teacher2.csv")
    val df2: DataFrame = session.read.option("header" , true).option("inferSchema",true).csv("file:///D://code/doit30_spark_sql/data/csv/orders.csv")
     // 没有指定关联条件  默认就是笛卡尔积
    val  res1 = df1.join(df2)
    res1.show()
   println("-----------------------------------")
    val res2 = df1.join(df2, df1("id") === df2("id"))
    res2.show()
    println("-----------------------------------")
   // 两张表的id作为关联字段  on  t1.id = t2.id
   val res3 = df1.join(df2, "id")
    // 多个字段作为关联条件
    // on  t1.id = t2.id  and  t1.money = t2.money
   val res4 = df1.join(df2, List("id" , "money") , "left")
   val res5 = df1.join(df2, List("id" , "money") , "right")

    res3.show()

  }
}
