package Join

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
  * Created by Administrator on 2018/5/28.
  */
object JoinTest {
  def main(args: Array[String]): Unit = {
    //创建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("JoinTest")
      .master("local[*]")
      .getOrCreate()

    //创建DataSet,第一个数据集
    import spark.implicits._
    val lines: Dataset[String] = spark.createDataset(List("1,lsl,china", "2,yjr,usa", "3,xd,jp"))

    //对数据进行整理
    val tpDs: Dataset[(Long, String, String)] = lines.map(line => {
      val fields: Array[String] = line.split(",")
      val id = fields(0).toLong
      val name = fields(1)
      val nationCode = fields(2)
      (id, name, nationCode)
    })
    val df1: DataFrame = tpDs.toDF("id", "name", "nation")

    //创建第二个数据集
    val nations: Dataset[String] = spark.createDataset(List("china,中国", "usa,美国"))
    //对数据进行整理
    val ndataSet: Dataset[(String, String)] = nations.map(l => {
      val fields: Array[String] = l.split(",")
      val ename = fields(0)
      val cname = fields(1)
      (ename, cname)
    })
    val df2: DataFrame = ndataSet.toDF("ename", "cname")

//    df1.count()
    //join的方式有两种
    //第一种，创建视图，然后按sql表的方式join,默认是inner
    df1.createTempView("v_users")
    df2.createTempView("v_nations")
    val r1: DataFrame = spark.sql("SELECT name, cname FROM v_users JOIN v_nations ON nation = ename")
    r1.show()

    //第二种，用spark自带的join方式,测试左连接
    val r2: DataFrame = df1.join(df2, $"nation" === $"ename", "left_outer")
    r2.show()

    //释放资源
    spark.stop()
  }
}
