package main.scala.demo

import java.util.Properties

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  * SparkSQLDemo1
  *
  * @author zhangyimin
  * @date 2018-10-10 下午2:58
  * @version 1.0
  */
object SparkSQLDemo1 {

  def main(args: Array[String]): Unit = {
    //屏蔽日志
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    val spark = SparkSession.builder().appName("demo1").master("local").getOrCreate()
    val sc = spark.sparkContext
    val stuRdd = sc.textFile("/Users/zhangyimin/training/data/input/all.txt").map(_.split(" "))
    //    val stuDF: DataFrame = structSchema(spark, stuRdd)
    val stuDF: DataFrame = caseSchema(spark, stuRdd)
    //将DF注册成视图
    stuDF.createTempView("student")
    //执行SQL
    val result = spark.sql("select * from student where age<=24")
    //将数据保存到mysql的表中
    val props = new Properties()
    //    props.setProperty("url","jdbc:mysql://localhost:3306/hive_etl?characterEncoding=utf-8&useSSL=false")
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    //    val res=spark.read.jdbc("jdbc:mysql://localhost:3306/hive_etl?characterEncoding=utf-8&useSSL=false","stu",props)
    //不需要提前创建一张与之对应的表
//    result.write.jdbc("jdbc:mysql://localhost:3306/hive_etl?characterEncoding=utf-8&useSSL=false", "stu", props)
    //如果表已经存在,采用append的方式保存数据
    // mode 默认为error
    // 其他模式分别为:
    //      case "overwrite" => 重写
    //      case "append" => 追加
    //      case "ignore" => 忽略  (就是什么都不做)
    result.write.mode("append").jdbc("jdbc:mysql://localhost:3306/hive_etl?characterEncoding=utf-8&useSSL=false", "stu", props)


    sc.stop();
    spark.stop();


  }

  /**
    * 使用structType来绑定数据源并生成一个spark table
    *
    * @param spark
    * @param stuRdd
    * @return
    */
  private def structSchema(spark: SparkSession, stuRdd: RDD[Array[String]]) = {
    //定义表结构schema
    val schema = StructType(List(
      StructField("id", IntegerType, true),
      StructField("name", StringType, true),
      StructField("age", IntegerType, true)
    ))
    //将RDD映射成一个ROW
    val rddRow = stuRdd.map(student => Row(student(0).toInt, student(1), student(2).toInt))
    //创建DataFrame
    val stuDF = spark.createDataFrame(rddRow, schema)
    stuDF
  }

  /**
    * 使用样本类来绑定数据源并生成一个spark table
    *
    * @param spark
    * @param stuRdd
    */
  private def caseSchema(spark: SparkSession, stuRdd: RDD[Array[String]]) = {
    //将RDD映射成一个样本类
    val dataRdd = stuRdd.map(student => Student(student(0).toInt, student(1), student(2).toInt))
    val stuDF = spark.createDataFrame(dataRdd)
    //也可以这样写
    //导入隐式转换
    //    import spark.sqlContext.implicits._
    //    dataRdd.toDF()
    stuDF
  }


}

case class Student(id: Int, name: String, age: Int)