package com.zyh.day04

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import java.sql.{Connection, DriverManager}

object FirstSparkSQLTest {
  def main(args: Array[String]): Unit = {

    //1 创建sparksql的入口对象SparkSession
    val builder: SparkSession.Builder = SparkSession.builder()
    builder.appName("sparkSQL").master("local[*]")
    val sparkSession: SparkSession = builder.getOrCreate()

    //2 加载数据源,将其转换为dataFrame
    val rdd1: RDD[String] = sparkSession.sparkContext.textFile("D:/users.txt")
    //rdd-->df
    val rdd2: RDD[Array[String]] = rdd1.map(item => item.split("\\s+"))
    //rdd2-->df
    //rdd中元素必须时case class 才能和dataframe的一行做对应
    //Array[String] ==>Tuple4 ==>row
    val rdd3: RDD[(Int, String, Int, String)] = rdd2.map(array => (array(0).toInt, array(1), array(2).toInt, array(3)))
    //引入隐式转换
    import sparkSession.implicits._
    //需要明确每一列的列名
    val df: DataFrame = rdd3.toDF("id", "name", "age", "sex")

    //3 注册df为表
    df.createOrReplaceTempView("t_user")

    //4 执行Sql
    val sql = "select name,age from t_user"
    val result: DataFrame = sparkSession.sql(sql)

    //5 处理结果
    result.show()

    //6 释放资源
    sparkSession.close()

  }
}
