package com.zt.bigdata.spark.dataalgorithms.chapter04

import com.zt.bigdata.template.spark.BasicTemplate
import org.apache.spark.sql.Encoders

/**
  *
  */

case class User(user_id: String, location_id: String)

case class Transaction(transaction_id: String, product_id: String, user_id: String
                       , quantity: String, amount: String)

class LeftJoinSql extends BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {
    val leftFile = parameter.leftFile
    val rightFile = parameter.rightFile
    val spark = buildSparkSession(parameter, Map("spark.sql.warehouse.dir" -> "/tmp/spark-warehouse"))
    val leftTable = spark.read.csv(leftFile)
    val rightTable = spark.read.csv(rightFile)

    val leftSchema = Encoders.product[Transaction].schema
    val left = spark.createDataFrame(leftTable.rdd, leftSchema)
    val rightSchema = Encoders.product[User].schema
    val right = spark.createDataFrame(rightTable.rdd, rightSchema)
    left.show()
    right.show()

    left.createTempView("transactions")
    right.createTempView("users")

    spark.sql("select product_id,location_id" +
      " from transactions LEFT OUTER JOIN users on transactions.user_id = users.user_id").show()

    spark.sql(
      """select product_id,count(location_id)
        |from
        |transactions
        |LEFT OUTER JOIN users
        |on transactions.user_id = users.user_id
        |group by product_id""".stripMargin).show()

    spark.sql(
      """select product_id,count(distinct location_id)
        |from
        |transactions
        |LEFT OUTER JOIN users
        |on transactions.user_id = users.user_id
        |group by product_id""".stripMargin).show()

    spark.stop()
  }
}