/**
 * FileName: StructuredStreaming
 * Author:   SAMSUNG-PC 孙中军
 * Date:     2019/01/08 10:12
 * Description: 采用Structured Streaming进行数据采集
 * 放弃使用类映射的方式生成schema
 * 转为使用withColumn和select方式
 * 将一列数据分割成为多列
 */
package cn.com.bonc.app;

import cn.com.bonc.conf.ConfigurationManager;
import cn.com.bonc.constant.Constants;
import cn.com.bonc.factory.SourceFactory;
import cn.com.bonc.source.Source;
import cn.com.bonc.util.ColumnsUtil;
import cn.com.bonc.util.DataFilterAndOperatorUtil;
import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.streaming.OutputMode;
import org.apache.spark.sql.streaming.StreamingQuery;
import org.apache.spark.sql.streaming.StreamingQueryException;
import scala.Tuple2;
import scala.reflect.internal.Trees;

import java.sql.Timestamp;

import static org.apache.spark.sql.functions.col;
import static org.apache.spark.sql.functions.split;
import static org.apache.spark.sql.functions.window;


public class StructuredStreamingKafka {

	public static void main(String[] args){

		SparkSession spark = SparkSession
				.builder()
				.appName("MyAutoMappingDataApp")
				.getOrCreate();

		Dataset<Row> kafkaDataset = spark
				.readStream()
				.format("kafka")
				.option("kafka.bootstrap.servers", ConfigurationManager.getProperty(Constants.KAFKA_BOOTSTRAP_SERVERS))
				.option("subscribe", "stopic")
				.option("startingOffsets",ConfigurationManager.getProperty(Constants.KAFKA_AUTO_OFFSET_RESET))
				.load();


		System.out.println("===============================>kafka："+kafkaDataset.columns().length);

		Dataset<Row> filterDataset = kafkaDataset
				.selectExpr("CAST(value AS STRING)","CAST(timestamp AS TIMESTAMP)")
				.as(Encoders.tuple(Encoders.STRING(), Encoders.TIMESTAMP()))
				.map((MapFunction<Tuple2<String, Timestamp>, Tuple2<String, Timestamp>>)t->
								new Tuple2<>(DataFilterAndOperatorUtil.getInstance().filter(t._1),t._2),
						Encoders.tuple(Encoders.STRING(), Encoders.TIMESTAMP())
				)
				.filter((FilterFunction<Tuple2<String, Timestamp>>) t->!"null".equals(t._1))
				.toDF("value","timestamp");


		Dataset<Row> words = filterDataset
				.select(col("value").cast("string"),col("timestamp"))
				.withColumn("tmp", split(col("value"),ColumnsUtil.getRegex()))
				.select(ColumnsUtil.getInstance().combineColumns(ColumnsUtil.getInstance().getColumns("tmp"),
						col("timestamp")))
				.drop(col("tmp"));

		words.printSchema();


		//30s内手机号出现的次数
		Dataset<Row> windowedCounts = words
				.groupBy(window(col("timestamp"),"30 seconds"), col("phone"))
				.count();

		//输出到控制台
		StreamingQuery query = windowedCounts
				.select("phone","count")
				//.select(col("phone"),col("userAgent"),col("URL"),col("timestamp"))
				.writeStream()
				.format("console")
				//.outputMode(OutputMode.Append())
				//.outputMode(OutputMode.Complete())
				.outputMode(OutputMode.Update())
				.option("truncate", "false")
				.start();

//		StreamingQuery query = execute
//				.writeStream()
//				.format("kafka")
//				.option("kafka.bootstrap.servers", "host1:port1,host2:port2")
//				.option("topic", "updates")
//				.start();
		//保持程序运行
		try {
			query.awaitTermination();
		} catch (StreamingQueryException e) {
			e.printStackTrace();
		}
	}

}
