package com.navinfo.platform.basicdatastatistics.utils;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.List;

/**
 * 大数据服务相关的工具类
 *
 * @author 沈东生
 */
public class BigDataUtils {
    static final Logger LOGGER = LoggerFactory.getLogger(BigDataUtils.class);

    public static enum CoalesceType {
        /**
         * 通过coalesce方式修改分区数
         */
        coalesce,
        /**
         * 通过repartation方式修改分区数
         */
        repartation
    }

    /**
     * 通过保存hdfs parquet文件的方式保存文件
     */
    public static final String SAVE_TYPE_HDFS="hdfs";
    /**
     * 通过hive insert overrite语句的方式保存数据
     */
    public static final String SAVE_TYPE_HIVE="hive";
    /**
     * 保存parquet文件
     *
     * @param dataFrame      数据集
     * @param outPath        保存路径
     * @param type           减小Rdd分区数的方式
     * @param coalesceNumber
     */
    public static void saveParquet(Dataset<Row> dataFrame, String outPath, CoalesceType type, Integer coalesceNumber) {
        Dataset<Row> cframe = repartationDs(dataFrame, type, coalesceNumber);
        cframe.write().mode(SaveMode.Overwrite).parquet(outPath);
    }

    private static Dataset<Row> repartationDs(Dataset<Row> dataFrame, CoalesceType type, Integer coalesceNumber) {
        if (coalesceNumber == null) {
            coalesceNumber = 1;
        }
        if (type == null) {
            type = CoalesceType.repartation;
        }

        Dataset<Row> cframe;
        if (type == CoalesceType.coalesce) {
            cframe = dataFrame.coalesce(coalesceNumber);
        } else {
            cframe = dataFrame.repartition(coalesceNumber);
        }
        return cframe;
    }

    /**
     * 将dataset中的数据保存到hive表中
     *
     * @param spark
     * @param ds              数据集
     * @param coalesceType    分区方式
     * @param coalesceNumber  分区数
     * @param hiveDbName      保存到的hive数据库名
     * @param saveTableName   hive 表名
     * @param partationKeys   分区键
     * @param partationValues 分区键对应的值
     * @param selectColumns   查询对应的sql语句
     */
    public static void saveDataSetToHiveTable(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String hiveDbName, String saveTableName, List<String> partationKeys, List<Object> partationValues, List<String> selectColumns) {
        ds = repartationDs(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + saveTableName;
        ds.createOrReplaceTempView(tmpTableName);
        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", hiveDbName, saveTableName));
        sb.append("\n");
        //分区信息
        if (!CommonUtils.isCollectionEmpty(partationKeys)) {
            List<String> partations = new ArrayList<>();
            for (int i = 0; i < partationKeys.size(); i++) {
                String key = partationKeys.get(i);
                Object value = partationValues.get(i);
                String sValue;
                if (value instanceof String) {
                    sValue = String.format("'%s'", value.toString());
                } else {
                    sValue = value.toString();
                }
                partations.add(String.format("%s=%s", key, sValue));
            }
            String sPartations = CommonUtils.mkString(partations, ",");
            sb.append(String.format("PARTITION(%s)", sPartations));
            sb.append("\n");
        }
        sb.append(String.format(" select %s \n from \n %s ", CommonUtils.mkString(selectColumns, ","), tmpTableName));
        String sql = sb.toString();
        LOGGER.error("OOO->sql:{}", sql);
        spark.sql(sql);
    }
}
