package com.yang.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{Row, functions}
import org.apache.spark.{SparkConf, SparkContext}


object SparkApp {

  /**
    * spark-sql 操作合集
    * 执行命令：spark-submit --class com.asiainfo.scala.InModer --master yarn-client --driver-memory 1G --executor-memory 1G --executor-cores 1 ~/bigdata-spark-app.jar
    * @param args
    */
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("spark-sql")
    val sc = new SparkContext(conf)
    val sql = new HiveContext(sc)

    //读取hive表生成dataframe
    val df = sql.sql("select * from table".stripMargin)

    //读取格式化文件生成dataframe
    val city_info = sc.textFile("/path").map(_.split(",")).map(p => Row(p(0),p(1).trim))
    val columns = "area_code,city_code,city_name,parent_code,parent_name,lat,lng,is_airport_city"
    val schema = StructType(StructType(columns.split(",").map(fieldName=>StructField(fieldName,StringType,true))))
    val city = sql.createDataFrame(city_info,schema)

    //读取json格式文件生成dataframe
    val info = sql.read.json("/path")

    //读取关系型数据库表生成dataframe,此处以mysql为例
    val jdbc = sql.read.format("jdbc").options(Map("url" -> "jdbc:3306","user" -> "user","password" -> "pwd","dbtable" -> "tablename")).load()

    //打印10条数据记录（若不指定参数，默认打印20条）
    df.show(10)
    //取limit 10操作
    df.limit(10)

    //条件筛选
    df.where("name='yang'")
    df.filter("name='yang'")

    //数据查询
    df.select("name","age")
    df.select(df("name"),df("age") + 1)
    df.selectExpr("substr(name,1,2)","age as ages")

    //删除字段
    df.drop("name")

    //排序操作
    df.orderBy(df("name").desc)

    //聚合函数
    df.groupBy("name").sum()
    df.groupBy("name").max()
    df.agg("name" -> "max","age" -> "sum")
    df.groupBy("name").agg("name" -> "count")

    //去重
    df.distinct()
    //根据指定字段去重(select distinct a,b)
    df.dropDuplicates(Seq("name"))

    //union
    df.unionAll(df)

    //关联操作(关联的两张表中的关联字段必须是一样的)
    df.join(df,Seq("name"),"inner")
    df.join(df,df("name") === df("name11"),"inner")

    //取两表中相同的记录
    df.intersect(df)

    //取左表中右表没有的记录
    df.except(df)

    //字段重命名
    df.withColumnRenamed("id","id1")

    //增减字段
    df.withColumn("wight",df("wight"))

    //行列转换
    df.explode("name","new"){p:String => p.split(" ")}

    //存入临时表（程序执行结束，表即被删除）
    df.registerTempTable("temp_table")

    //窗口函数
    df.select(df("phone_no"),functions.row_number().over(Window.partitionBy("phone_no").orderBy(df("num")))).alias("rank")
  }

}
