package org.apache.spark.examples.sql.streaming;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

/**
 * Created by admin on 2019/3/23.
 */
public class JavaAvrotoKafka {
    public static void main(String[] args) {
        System.setProperty("hadoop.home.dir", "C:/hadoop-2.6.0");
        SparkSession spark = SparkSession
                .builder()
                .appName("sql DataSource Example")
                .getOrCreate();
        Dataset<Row> df = spark
                .readStream()
                .format("kafka")
                .option("kafka.bootstrap.servers", "172.30.17.160:9092,172.30.17.161:9092")
                .option("subscribe", "streamsets_realtime")
                .load();

        // 1. Decode the Avro data into a struct;
// 2. Filter by column `favorite_color`;
// 3. Encode the column `name` in Avro format.
//        Dataset<Row> output = df
//                .select(from_avro(col("value"), jsonFormatSchema).as("user"))
//                .where("user.favorite_color == \"red\"")
//                .select(to_avro(col("user.name")).as("value"));
//
//    }
    }
}
