package com.shujia.spark.sql

import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

object Demo5UDF {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("app").setMaster("local")

    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    val student = sqlContext.read.json("spark/data/students.json")
    student.registerTempTable("student")


    val i = sc.accumulator(0)


    //自定义udf  ， Driver定义
    sqlContext.udf.register("subClazz", (clazz: String) => {
      //自定义函数内可以使用累加器和广播变量
      i.add(1)
      // 自定义函数内的代码，运行在Executor端
      clazz.substring(0, 2)
    })


    sqlContext.sql("select id,subClazz(clazz) from student").write.mode(SaveMode.Overwrite).json("spark/data/udf")


    println(i.value)


    sqlContext.sql("select count(distinct clazz) from student").show()

    /**
      * udf ： 1对1关系
      * udaf： 多对1的关系
      *
      */
    //自定义count

    sqlContext.udf.register("stringCount", new StringCount)

    sqlContext.sql("select clazz,stringCount(clazz) from student group by clazz").show()

  }
}
