package com.xb.stream.streaming;

import com.xb.stream.common.PropertiesLoad;
import kafka.serializer.StringDecoder;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.hive.HiveContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

import java.util.*;

/***
 * SparkStream拉取kafka的消息
 */
public class LoadDataBySparkStream {

    private static HiveContext hiveContxt;
    private  static SparkConf sparkConf;
    private static StructType schema;
    private static JavaSparkContext sparkContext;
    private static Map<String,String> params;
    private static Set<String> topics;
    private static JavaStreamingContext jsc;
    private static JavaPairInputDStream<String,String> stream;

    public static void main(String[] args) {
        //最好的方式是从参数传入
//        PropertiesLoad.environment = args[0];
        //组装Spark启动需要的配置的参数
        createSparkConf();
        //组装spark全局的SparkContext
        createSparkContext();
        //创建hiveContxt用于向hive写入数据
        createHiveContext(sparkContext);
        //创建StreamContext
        createJavaStreamContext();
        //创建消费kafka生成Dstream
        createDStream();
        //创建解析输入JSON数据的格式
        createOutputSchema();
        //数据清洗以及数据存入hive
        etl(stream);
        try {
            jsc.start();
            jsc.awaitTermination();
        }catch (Exception e) {
            System.out.println("Stream Context Exception!");
        }finally {
            jsc.stop();
        }
    }

    /**
     * 创建SparkConf配置文件对象
     */
    private static void createSparkConf(){

        sparkConf = new SparkConf()
                .set("fs.hdfs.impl", DistributedFileSystem.class.getName())
                .set("fs.file.impl", LocalFileSystem.class.getName())
                .set("spark.sql.warehouse.dir", PropertiesLoad.getValue("spark.sql.warehouse.dir"))
                .set("dfs.client.use.datanode.hostname", "true")
                .set("fs.defaultFS", PropertiesLoad.getValue("fs.defaultFS"))
                .set("ffs.default.name", PropertiesLoad.getValue("fs.default.name"))
                .set("hive.server2.thrift.bind.host", PropertiesLoad.getValue("hive.server2.thrift.bind.host"))
                .set("hive.server2.webui.host", PropertiesLoad.getValue("hive.server2.webui.host"))
                .set("javax.jdo.option.ConnectionURL", PropertiesLoad.getValue("javax.jdo.option.ConnectionURL"))
                .set("hive.metastore.uris", PropertiesLoad.getValue("hive.metastore.uris"))
                .set("mapred.job.tracker", PropertiesLoad.getValue("mapred.job.tracker"))
                .set("dfs.support.append", "true")
                .set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER")
                .set("dfs.client.block.write.replace-datanode-on-failure.enable", "true").setAppName("load-data-to-hive")
                .setMaster("local[1]");
    }

    /**
     * 创建SparkContext全局对象
     */
    private static void createSparkContext(){
        sparkContext  = new JavaSparkContext(sparkConf);
    }

    /**
     * 创建SparkSql中的HiveContx对象
     * @param sparkContext
     */
    private static void createHiveContext(JavaSparkContext sparkContext){
        hiveContxt = new HiveContext(sparkContext);
        hiveContxt.setConf("hive.merge.mapfiles","true");
        hiveContxt.setConf("mapred.max.split.size","256000000");
        hiveContxt.setConf("mapred.min.split.size.per.node","192000000");
        hiveContxt.setConf("mapred.min.split.size.per.rack","192000000");
        hiveContxt.setConf("hive.input.format","org.apache.hadoop.hive.ql.io.CombineHiveInputFormat");
    }

    /**
     * 使用SparkConxt对象,创建SparkStreamContx对象用于SparkStream任务调度
     */
    private static void  createJavaStreamContext(){
        jsc = new JavaStreamingContext(sparkContext,new Duration(Long.valueOf(PropertiesLoad.getValue("stream.duration.time"))));
    }

    /**
     * 创建DStream对象,用于从kafka中读取,b并且返回DStream
     */
    private static void createDStream(){
        createParams();
        stream = KafkaUtils.createDirectStream(jsc,String.class,String.class,
                StringDecoder.class,StringDecoder.class,params,topics);
    }


    /**
     *
     * 创建解析JSON数据的schema结构
     *
     */
    private static void createOutputSchema(){
        List<StructField> outputFields = new ArrayList<>();
        outputFields.add(DataTypes.createStructField("planid",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("adsid",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("zoneid",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("plantype",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("siteid",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("site_page",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("useragent",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("screen",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("browser",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("ip",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("country",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("province",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("city",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("isp",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("lon",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("lat",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("day",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("addtime",DataTypes.StringType,true));
        outputFields.add(DataTypes.createStructField("uuid",DataTypes.StringType,true));
        schema = DataTypes.createStructType(outputFields);
    }


    /**
     * 组装kafkaUtils工具创建DStream所需要的配置文件
     */
    private static void createParams(){
        params = new HashMap<>();
        params.put("bootstrap.servers",PropertiesLoad.getValue("bootstrap.servers"));
        topics = new HashSet<>();
        topics.add(PropertiesLoad.getValue("stream.kafka.topics"));
        params.put("group.id",PropertiesLoad.getValue("group.id"));
    }

    /**
     * 对DStream流中的每天数据进行遍历
     * @param stream
     */

    private static void etl(JavaPairInputDStream<String,String> stream){
        stream.foreachRDD(new VoidFunction<JavaPairRDD<String, String>>() {
            @Override
            public void call(JavaPairRDD<String, String> tuple) throws Exception {
                if (tuple.isEmpty())
                    return;
                Dataset<Row> rowDataset = hiveContxt.jsonRDD(tuple.values(),schema);
                writeToHive(rowDataset.coalesce(1));
//                writeToHive(rowDataset.repartition(2).persist());
            }
        });
    }


    private static void writeToHive(Dataset<Row> row){
        row.createOrReplaceTempView("logs");
        String sql = "insert into " +PropertiesLoad.getValue("hive.table.ticket") +
                " PARTITION(dt='" + org.apache.tools.ant.util.DateUtils.format(new Date(), "yyyy-MM-dd") + "') "
                + "select " +
                "planid as planid," +
                " adsid as adsid, " +
                "zoneid as zoneid, " +
                "plantype as plantype," +
                "siteid as siteid , " +
                "site_page as site_page," +
                "useragent as useragent ," +
                "screen as screen," +
                "browser as browser," +
                "ip as ip," +
                "country as country, " +
                "province as province, " +
                "city as city," +
                "isp as isp ," +
                " lon as lon ," +
                "lat as lat ," +
                " day as day," +
                "addtime as addtime," +
                " uuid as uuid" +
                " from logs";
        long start = System.currentTimeMillis();
        System.out.println(sql);
        hiveContxt.sql(sql);
        long end = System.currentTimeMillis();
        System.out.println("insert into hive Cost Time   "+(end -start)+"      ones time size is    "+ row.count());
    }

}
