package com.zq.learn.spark;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.AnalysisException;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.apache.spark.streaming.kafka010.LocationStrategy;
import scala.Tuple2;

import javax.xml.stream.Location;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;

/**
 * SparkApp
 *
 * @author ZhangQi
 * @date 2020/2/6
 */
public class SparkApp<main> {
//  @Test
//  public void testSQL() throws AnalysisException {
    //spark sql
//    SparkSession session = SparkSession.builder().master("spark://bogon:7077").appName("Word Count")
//      .config("spark.some.config.option", "some-value").getOrCreate();
//    Dataset<Row> namedf = session.read().json("src/main/java/com/zq/learn/spark/namedf.json");
//    Dataset<Row> scoredf = session.read().json("src/main/java/com/zq/learn/spark/score.json");
//    namedf.show();
//    scoredf.show();
//    namedf.createOrReplaceTempView("user");
//    scoredf.createOrReplaceTempView("score");
//    Dataset<Row> sql =
//      session.sql("select user.name,user.age,score.score from user,score where user.name=score.name ");
//    sql.show();
//  }

  public static void main(String[] args) {
    SparkConf conf=new SparkConf();
    conf.setMaster("local");
    conf.setAppName("Streaming Test");
    conf.set("spark.streaming.stopGracefullyOnShutdown","true");
    conf.set("spark.default.parallelism","6");

    SparkSession session = SparkSession.builder().config(conf).getOrCreate();
    JavaSparkContext context = new JavaSparkContext(session.sparkContext());
    context.setLogLevel("WARN");
    JavaStreamingContext javaStreamingContext =
      new JavaStreamingContext(context, Durations.seconds(10));

    String brokers="localhost:9092";
    Map<String,Object> kafkaParams=new HashMap<>();
    kafkaParams.put("metadata.broker.list",brokers);
    kafkaParams.put("bootstrap.servers", brokers);
    kafkaParams.put("group.id", "group1");
    kafkaParams.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    kafkaParams.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    kafkaParams.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    String topics="topic1";
    Collection<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));

    Map<TopicPartition,Long> offset=new HashMap<>();
    offset.put(new TopicPartition("topic1",0),2L);

    JavaInputDStream<ConsumerRecord<String, String>>
      lines= KafkaUtils.createDirectStream(javaStreamingContext, LocationStrategies.PreferConsistent(),
      ConsumerStrategies.<String,String>Subscribe(topicsSet,kafkaParams,offset));

    JavaPairDStream<String, Integer> counts =
      lines.flatMap(x -> Arrays.asList(x.value().toString().split(" ")).iterator())
        .mapToPair(x -> new Tuple2<String, Integer>(x, 1))
        .reduceByKey((x, y) -> x + y);
    counts.print();

    //打印全部内容
//        lines.foreachRDD(rdd -> {
//          rdd.foreach(x -> {
//            System.out.println("******************************************"+x.value());
//          });
//        });

    javaStreamingContext.start();
    try {
      javaStreamingContext.awaitTermination();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }finally {
      javaStreamingContext.close();
    }
  }

}
