package com.study.spark.scala.dataset

import org.apache.spark.sql.SparkSession

/**
 * join操作
 *
 * @author stephen
 * @date 2019-09-27 17:05
 */
object JoinDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("JoinDemo")
      .master("local[*]")
      .getOrCreate()

    // 隐式转换
    import spark.implicits._

    val df1 = spark.createDataset(
      Seq((3, "zhangsan", "CN", 20),
        (4, "lisi", "CN", 30),
        (5, "wangwu", "JP", 40),
        (6, "zhaoliu", "KP", 50)))
      .toDF("id", "name", "c_code", "age")

    val df2 = spark.createDataset(
      Seq(("CN", "中国"),
        ("JP", "日本"),
        ("UK", "英国")))
      .toDF("c_code", "c_name")

    // 内连接 select * from df1 join df2 on df1.key1=df2.key1
    df1.join(df2, "c_code").show()
    df1.join(df2, Seq("c_code")).show()
    // key不一样时
    //df1.join(df2, $"c_code" === $"code").show()
    //df1.join(df2, df1("c_code") === df2("code")).show()
    // joinWith得到的新Dataset的schema会不一样
    df1.joinWith(df2, df1("c_code") === df2("c_code")).show()

    // 外连接 select * from df1 outer join df2 on df1.key1=df2.key1
    df1.join(df2, df1("c_code") === df2("c_code"), "outer").show()

    // 左外连接 select * from df1 left join df2 on df1.key1=df2.key1
    df1.join(df2, df1("c_code") === df2("c_code"), "left_outer").show()

    // 右外连接 select * from df1 right join df2 on df1.key1=df2.key1
    df1.join(df2, df1("c_code") === df2("c_code"), "right_outer").show()

    // 左半连接
    df1.join(df2, df1("c_code") === df2("c_code"), "leftsemi").show()

    // 笛卡尔积连接
    df1.crossJoin(df2).show()

    // 条件连接
    //df1.join(df2, df1("key1")===df2("key1") && df1("key2")>df2("key2")).show()

    spark.stop()
  }
}
