package com.atguigu.ad.spark;

import org.apache.commons.cli.*;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

/**
 * @author Hefei
 * @description 使用spark SQL将Hive中的数据 导入到CK中  其中需要按格式传参
 * @project_name com.atguigu.ad.spark
 * @since 2023/4/7 10:21
 */
public class Hive2ClickHouse {
    private static CommandLine cmd;

    public static void main(String[] args) {
        //使用 commons-cli 解析参数
        //1.定义参数
        // Options options = new Options();
        // OptionBuilder.withLongOpt("hive_db");
        // OptionBuilder.withDescription("hive数据库名称(required)");
        // OptionBuilder.hasArg(true);
        // OptionBuilder.isRequired(true);
        // options.addOption(OptionBuilder.create());
        //
        //
        // options.addOption(OptionBuilder.withLongOpt("hive_table").withDescription("hive表名称(required)").hasArg(true).isRequired(true).create());
        // options.addOption(OptionBuilder.withLongOpt("hive_partition").withDescription("hive分区(required)").hasArg(true).isRequired(true).create());
        // options.addOption(OptionBuilder.withLongOpt("ck_url").withDescription("clickhouse的jdbc url(required)").hasArg(true).isRequired(true).create());
        // options.addOption(OptionBuilder.withLongOpt("ck_table").withDescription("clickhouse表名称(required)").hasArg(true).isRequired(true).create());
        // options.addOption(OptionBuilder.withLongOpt("batch_size").withDescription("数据写入clickhouse时的批次大小(required)").hasArg(true).isRequired(true).create());

        Options options = new Options();

        options.addOption(Option.builder("hbd").longOpt("hive_table")
                .desc("hive表名称(required)").hasArg(true)
                .required(true).build());

        // 省略其他几个



        try {
            CommandLineParser commandLineParser = new DefaultParser();
            cmd = commandLineParser.parse(options, args);
        } catch (ParseException e) {
            // System.out.println(e.getMessage() + "\n" + getHelpString());
            System.exit(0);


        }

        //3.创建SparkConf
        SparkConf sparkConf = new SparkConf().setAppName("hive2clickhouse");

        //4.创建SparkSession,并启动Hive支持
        SparkSession sparkSession = SparkSession.builder().enableHiveSupport().config(sparkConf).getOrCreate();

        //5.设置如下参数,支持使用正则表达式匹配查询字段
        sparkSession.sql("set spark.sql.parser.quotedRegexColumnNames=true");

        //6.执行如下查询语句,查询hive表中除去dt分区字段外的所有字段
        String sql = "select `(dt)?+.+` from " + cmd.getOptionValue("hive_db") + "." + cmd.getOptionValue("hive_table") + " where dt='" + cmd.getOptionValue("hive_partition") + "'";
        Dataset<Row> hive = sparkSession.sql(sql);

        //7.将数据通过jdbc模式写入clickhouse
        hive.write().mode(SaveMode.Append)
                .format("jdbc")
                .option("url", cmd.getOptionValue("ck_url"))
                .option("dbtable", cmd.getOptionValue("ck_table"))
                .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
                .option("batchsize", cmd.getOptionValue("batch_size"))
                .save();

        //8.关闭SparkSession
        sparkSession.close();
    }
}