import cn.doitedu.commons.util.SparkUtil
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory, StructObjectInspector}
import org.apache.spark.sql.SparkSession

class UDTFDemo extends  GenericUDTF{


  override def initialize(argOIs: StructObjectInspector): StructObjectInspector = {

    val fieldNames = new java.util.ArrayList[String]()
    val fieldOIs = new java.util.ArrayList[ObjectInspector]()

    //这里定义的是输出列默认字段名称
    fieldNames.add("id")
    fieldNames.add("itv")
    fieldNames.add("times")
    //这里定义的是输出列字段类型
    fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector)
    fieldOIs.add(PrimitiveObjectInspectorFactory.javaIntObjectInspector)
    fieldOIs.add(PrimitiveObjectInspectorFactory.javaIntObjectInspector)

    ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs)


  }

  override def process(objects: Array[AnyRef]): Unit = {

    val uid = objects(0).toString
    val start = objects(1).asInstanceOf[Int]
    val end = objects(2).asInstanceOf[Int]
    val pre = objects(3).asInstanceOf[Int]
    forward(Array(uid,0,end-start))
    forward(Array(uid,start-pre,1))
  }

  override def close(): Unit = {

  }


}

object TestUDTF{
  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder
      .master("local")
      .appName("UserAnalysis")
      .enableHiveSupport()      //启用hive
      .getOrCreate()


    import spark.implicits._
    val df = spark.createDataset(Seq(("a", 12, 15, 8), ("b", 10, 16, 8))).toDF("id", "start", "end", "pre")
    df.createTempView("df")

    spark.sql("CREATE TEMPORARY FUNCTION xxyy as 'UDTFDemo'")
    spark.sql(
      """
        |select xxyy(id,start,end,pre) from df
        |""".stripMargin).show()


    spark.close()

  }
}
