package com.xian;

import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrameReader;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.hive.HiveContext;

import java.io.File;
import java.io.FileNotFoundException;
import java.util.Scanner;

public class ReadHiveData {
    public static void main(String[] args) throws Exception {

        String sqlFile = args[0];
        if(StringUtils.isBlank(sqlFile)){
            throw new IllegalArgumentException("第1个参数SQL文件不能为空!");
        }
        System.out.println("传入SQL文件为:"+sqlFile);

        String exeDate = args[1];
        if(StringUtils.isBlank(exeDate)){
            throw new IllegalArgumentException("第2个参数日期不能为空!");
        }
        System.out.println("传入日期为:"+exeDate);

        SparkConf conf = new SparkConf();
        //conf.setMaster("local[8]");
        //conf.setAppName("ReadHiveData");

        JavaSparkContext jsc = new JavaSparkContext(conf);
        jsc.setLogLevel("ERROR"); //不要那么打印多日志信息

        HiveContext hiveContext = new HiveContext(jsc); //一般用这个就可以通用了

        //遍历sql文件执行每一行数据
        Scanner scanner = new Scanner(new File("data/my.sql"));
        String sql = null;
        while (scanner.hasNextLine()) {
            sql  = scanner.nextLine();
            System.out.println("读取原SQL:"+sql);
            sql = sql.replace("${exeDate}",exeDate);
            System.out.println("替换日期参数后执行SQL:"+sql);
            Dataset<Row> rowDataset = hiveContext.sql(sql);
            rowDataset.show();
        }


        jsc.stop();
        jsc.close();
    }
}
