package com.navinfo.opentsp.platform.computing.analysis.util;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Optional;

/**
 * 大数据服务相关的工具类
 * @author 沈东生
 */
public class BigDataUtils {
    private static final Logger logger = LoggerFactory.getLogger(BigDataUtils.class);

    /**
     * 保存parquet文件
     * @param dataFrame      数据集
     * @param outPath        保存路径
     * @param type           减小Rdd分区数的方式
     * @param coalesceNumber
     */
    public static void saveParquet(Dataset<Row> dataFrame, String outPath, CoalesceType type, Integer coalesceNumber) {
        repartition(dataFrame, type, coalesceNumber).write().mode(SaveMode.Overwrite).parquet(outPath);
    }

    /**
     * 将dataSet中的数据保存到hive表中
     * @param spark
     * @param ds              数据集
     * @param coalesceType    分区方式
     * @param coalesceNumber  分区数
     * @param dbName      hive数据库名
     * @param tableName   hive表名
     * @param partitionKeys   分区键
     * @param partitionValues 分区键对应的值
     * @param selectColumns   查询语句对应的列
     */
    public static void saveHiveTable(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String dbName, String tableName, List<String> partitionKeys, List<String> partitionValues, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", dbName, tableName));
        sb.append("\n");

        //分区信息
        if (null!=partitionKeys && partitionKeys.size()>0) {
            List<String> partitionList = new ArrayList<>();
            for (int i=0; i<partitionKeys.size(); i++) {
                String key = partitionKeys.get(i);
                String value = partitionValues.get(i);
                partitionList.add(String.format("%s=%s", key, value));
            }
            sb.append(String.format("PARTITION(%s)",  mkString(partitionList, ",")));
            sb.append("\n");
        }

        sb.append(String.format(" select %s \n from \n %s ", mkString(selectColumns, ","), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);
        spark.sql(sql);
    }

    /**
     * 支持动态分区字段的hive保存
     * @param spark
     * @param ds
     * @param coalesceType
     * @param coalesceNumber
     * @param dbName
     * @param tableName
     * @param partitionKeys       动态分区字段
     * @param partitionColumns   查询语句对应的动态分区列表达式
     * @param selectColumns      查询语句对应的普通列
     */
    public static void saveHiveTableDynamic(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String dbName, String tableName, List<String> partitionKeys, List<String> partitionColumns, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", dbName, tableName));
        sb.append("\n");

        //创建落盘字段数组
        List<String> selectColumnsALL =  new ArrayList<>(selectColumns);

        //分区信息
        Optional.ofNullable(partitionKeys).ifPresent(e -> {
            sb.append(String.format("PARTITION(%s)", String.join(",",e)));
            sb.append("\n");

            Optional.ofNullable(partitionColumns).ifPresent(c ->{
                selectColumnsALL.addAll(c);
            });
        });

        sb.append(String.format(" select %s \n from \n %s ", String.join(",", selectColumnsALL), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);

        //开启动态分区
        spark.sql("set  hive.exec.dynamic.partition.mode = nonstrict");
        spark.sql("set  hive.exec.dynamic.partition = true");
        spark.sql(sql);
    }

    /**
     * 重置分区
     * @param dataFrame
     * @param type
     * @param coalesceNumber
     * @return
     */
    private static Dataset<Row> repartition(Dataset<Row> dataFrame, CoalesceType type, Integer coalesceNumber) {
        coalesceNumber = null==coalesceNumber ? 1 : coalesceNumber;
        type = null==type ? CoalesceType.repartation : type;

        return type== CoalesceType.coalesce ? dataFrame.coalesce(coalesceNumber) : dataFrame.repartition(coalesceNumber);
    }

    /**
     * 落盘方式
     */
    public enum SaveType {
        /**
         * 通过HDFS文件落盘数据
         */
        HDFS,
        /**
         * 通过Hive落盘数据
         */
        HIVE
    }
    /**
     * 加载方式
     */
    public enum LoadType {
        /**
         * 通过HDFS文件加载
         */
        HDFS,
        /**
         * 通过Hive加载
         */
        HIVE
    }


    /**
     * 分区方式
     */
    public enum CoalesceType {
        /**
         * 通过coalesce方式修改分区数
         */
        coalesce,
        /**
         * 通过repartation方式修改分区数
         */
        repartation
    }


    /**
     * 将list中的数据toString后用split作为分隔符，返回一个大的字符串
     * @param list
     * @param split
     * @return
     */
    public static String mkString(Collection<?> list, String split) {
        StringBuilder stringBuilder = new StringBuilder();

        if(null!=list && list.size()>0){
            int i = 0;
            for (Object obj : list) {
                if (null != obj) {
                    if (i != 0) {
                        stringBuilder.append(split);
                    }
                    stringBuilder.append(obj);
                    i++;
                }
            }
        }

        return stringBuilder.toString();
    }
}
