package com.navinfo.tripanalysis.offline.util;

import com.navinfo.tripanalysis.common.util.CommonUtils;
import com.navinfo.tripanalysis.common.util.DateUtils;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.stream.Collectors;

/**
 * 大数据服务相关的工具类
 * @author 沈东生
 */
public class BigDataUtils {
    private static final Logger logger = LoggerFactory.getLogger(BigDataUtils.class);

    /**
     * 通过保存hdfs parquet文件的方式保存文件
     */
    public static final String SAVE_TYPE_HDFS = "hdfs";
    /**
     * 通过hive insert overwrite语句的方式保存数据
     */
    public static final String SAVE_TYPE_HIVE = "hive";

    /**
     * 保存parquet文件
     * @param dataFrame      数据集
     * @param outPath        保存路径
     * @param type           减小Rdd分区数的方式
     * @param coalesceNumber
     */
    public static void saveParquet(Dataset<Row> dataFrame, String outPath, CoalesceType type, Integer coalesceNumber) {
        repartition(dataFrame, type, coalesceNumber).write().mode(SaveMode.Overwrite).parquet(outPath);
    }

    /**
     * 将dataSet中的数据保存到hive表中
     * @param spark
     * @param ds              数据集
     * @param coalesceType    分区方式
     * @param coalesceNumber  分区数
     * @param dbName      hive数据库名
     * @param tableName   hive表名
     * @param partitionKeys   分区键
     * @param partitionValues 分区键对应的值
     * @param selectColumns   查询语句对应的列
     */
    public static void saveHiveTable(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String dbName, String tableName, List<String> partitionKeys, List<String> partitionValues, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", dbName, tableName));
        sb.append("\n");

        //分区信息
        if (null!=partitionKeys && partitionKeys.size()>0) {
            List<String> partitionList = new ArrayList<>();
            for (int i=0; i<partitionKeys.size(); i++) {
                String key = partitionKeys.get(i);
                String value = partitionValues.get(i);
                partitionList.add(String.format("%s=%s", key, value));
            }
            sb.append(String.format("PARTITION(%s)",  CommonUtils.mkString(partitionList, ",")));
            sb.append("\n");
        }

        sb.append(String.format(" select %s \n from \n %s ", CommonUtils.mkString(selectColumns, ","), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);
        spark.sql(sql);
    }

    /**
     * 支持动态分区字段的hive保存
     * @param spark
     * @param ds
     * @param coalesceType
     * @param coalesceNumber
     * @param dbName
     * @param tableName
     * @param partitionKeys       动态分区字段
     * @param partitionColumns   查询语句对应的动态分区列表达式
     * @param selectColumns      查询语句对应的普通列
     */
    public static void saveHiveTableDynamic(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String dbName, String tableName, List<String> partitionKeys, List<String> partitionColumns, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", dbName, tableName));
        sb.append("\n");

        //创建落盘字段数组
        List<String> selectColumnsALL =  new ArrayList<>(selectColumns);

        //分区信息
        Optional.ofNullable(partitionKeys).ifPresent(e -> {
            sb.append(String.format("PARTITION(%s)", String.join(",",e)));
            sb.append("\n");

            Optional.ofNullable(partitionColumns).ifPresent(c ->{
                selectColumnsALL.addAll(c);
            });
        });

        sb.append(String.format(" select %s \n from \n %s ", String.join(",", selectColumnsALL), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);

        //开启动态分区
        spark.sql("set  hive.exec.dynamic.partition.mode = nonstrict");
        spark.sql("set  hive.exec.dynamic.partition = true");
        spark.sql(sql);
    }

    /**
     * 重置分区
     * @param dataFrame
     * @param type
     * @param coalesceNumber
     * @return
     */
    private static Dataset<Row> repartition(Dataset<Row> dataFrame, CoalesceType type, Integer coalesceNumber) {
        if(CoalesceType.none == type){
            return dataFrame;
        }
        coalesceNumber = null==coalesceNumber ? 1 : coalesceNumber;
        type = null==type ? CoalesceType.repartation : type;

        return type==CoalesceType.coalesce ? dataFrame.coalesce(coalesceNumber) : dataFrame.repartition(coalesceNumber);
    }

    /**
     * 落盘方式
     */
    public enum SaveType {
        /**
         * 通过HDFS文件落盘数据
         */
        HDFS,
        /**
         * 通过Hive落盘数据
         */
        HIVE
    }
    /**
     * 加载方式
     */
    public enum LoadType {
        /**
         * 通过HDFS文件加载
         */
        HDFS,
        /**
         * 通过Hive加载
         */
        HIVE
    }


    /**
     * 分区方式
     */
    public enum CoalesceType {
        /**
         * 通过coalesce方式修改分区数
         */
        coalesce,
        /**
         * 通过repartation方式修改分区数
         */
        repartation,
        /**
         * 不分区
         */
        none
    }

    /**
     * 数据更新方式
     */
    public enum InsertType  {
        /**
         * 覆盖原数据
         */
        overwrite,
        /**
         * 追加原数据
         */
        into
    }

    /**
     * 将dataSet中的数据保存到hive表中
     * @param spark
     * @param ds              数据集
     * @param coalesceType    分区方式
     * @param coalesceNumber  分区数
     * @param insertType    插入方式
     * @param dbName      hive数据库名
     * @param tableName   hive表名
     * @param partitionKeys   分区键
     * @param partitionValues 分区键对应的值
     * @param selectColumns   查询语句对应的列
     */
    public static void saveHiveTableByInsertType(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber,InsertType insertType, String dbName, String tableName, List<String> partitionKeys, List<String> partitionValues, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);
        String insert;
        if (insertType == null) {
            insert = InsertType.overwrite.name();
        }else {
            insert = insertType.name();
        }
        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert %s table %s.%s ", insert,dbName, tableName));
        sb.append("\n");

        //分区信息
        if (null!=partitionKeys && partitionKeys.size()>0) {
            List<String> partitionList = new ArrayList<>();
            for (int i=0; i<partitionKeys.size(); i++) {
                String key = partitionKeys.get(i);
                String value = partitionValues.get(i);
                partitionList.add(String.format("%s=%s", key, value));
            }
            sb.append(String.format("PARTITION(%s)",  CommonUtils.mkString(partitionList, ",")));
            sb.append("\n");
        }

        sb.append(String.format(" select %s \n from \n %s ", CommonUtils.mkString(selectColumns, ","), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);
        spark.sql(sql);
    }


    /**
     * 支持动态分区字段的hive保存
     * @param spark
     * @param ds
     * @param coalesceType
     * @param coalesceNumber
     * @param dbName
     * @param tableName
     * @param partitionKeys       动态分区字段
     * @param partitionColumns   查询语句对应的动态分区列表达式
     * @param selectColumns      查询语句对应的普通列
     */
    public static void saveHiveTableDynamicByInsertType(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, InsertType insertType,String dbName, String tableName, List<String> partitionKeys, List<String> partitionColumns, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);
        String insert;
        if (insertType == null) {
            insert = InsertType.overwrite.name();
        }else {
            insert = insertType.name();
        }
        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert %s table %s.%s ", insert,dbName, tableName));
        sb.append("\n");

        //创建落盘字段数组
        List<String> selectColumnsALL =  new ArrayList<>(selectColumns);

        //分区信息
        Optional.ofNullable(partitionKeys).ifPresent(e -> {
            sb.append(String.format("PARTITION(%s)", String.join(",",e)));
            sb.append("\n");

            Optional.ofNullable(partitionColumns).ifPresent(c ->{
                selectColumnsALL.addAll(c);
            });
        });

        sb.append(String.format(" select %s \n from \n %s ", String.join(",", selectColumnsALL), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);

        //开启动态分区
        spark.sql("set  hive.exec.dynamic.partition.mode = nonstrict");
        spark.sql("set  hive.exec.dynamic.partition = true");
        spark.sql(sql);
    }

    public static void dropHiveTablePartition (SparkSession spark,String dbName, String tableName ,List<String> partitionKeys, List<String> partitionColumns){
        StringBuffer sb = new StringBuffer();
        sb.append(String.format("alter table %s.%s drop if exists ",dbName,tableName));
        //分区信息
        if (null!=partitionKeys && partitionKeys.size()>0) {
            List<String> partitionList = new ArrayList<>();
            for (int i=0; i<partitionKeys.size(); i++) {
                String key = partitionKeys.get(i);
                String value = partitionColumns.get(i);
                partitionList.add(String.format("%s=%s", key, value));
            }
            sb.append(String.format("PARTITION(%s)",  CommonUtils.mkString(partitionList, ",")));
            sb.append("\n");
        }
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);
        spark.sql(sql);

    }

    /**
     * 将dataSet中的数据保存到hive表中
     * @param spark
     * @param ds              数据集
     * @param coalesceType    分区方式
     * @param coalesceNumber  分区数
     * @param hiveDbName      保存到的hive数据库名
     * @param saveTableName   hive 表名
     * @param partationKeys   分区键
     * @param partationValues 分区键对应的值
     * @param selectColumns   查询对应的sql语句
     */
    public static void saveDataSetToHiveTable(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String hiveDbName, String saveTableName, List<String> partationKeys, List<Object> partationValues, List<String> selectColumns) {
        ds = repartition(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + saveTableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", hiveDbName, saveTableName));
        sb.append("\n");

        //分区信息
        if (null != partationKeys && partationKeys.size()>0) {
            List<String> partationList = new ArrayList<>();
            for (int i=0; i<partationKeys.size(); i++) {
                String key = partationKeys.get(i);
                String value = partationValues.get(i).toString();
                partationList.add(String.format("%s=%s", key, value));
            }
            sb.append(String.format("PARTITION(%s)",  CommonUtils.mkString(partationList, ",")));
            sb.append("\n");
        }

        sb.append(String.format(" select %s \n from \n %s ", CommonUtils.mkString(selectColumns, ","), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);
        spark.sql(sql);
    }

    /**
     * 获取批次数据的Where条件
     * @param day
     * @param batchList 分批加载的hashtid列表，0到32
     * @param tidList   通过参数指定，特定加载的tid列表
     * @return
     */
    public static String getBatchWhereSql(Date day, List<Integer> batchList, List<Long> tidList) {
        String dayStr = DateUtils.format(day, DateUtils.DateFormat.YYYYMMDD);
        String rtn = "";
        try {
            //是否分区
            if (CommonUtils.isCollectionEmpty(batchList)) {
                //是否加载特定tid
                if(CommonUtils.isCollectionEmpty(tidList)){
                    rtn = String.format(" part_time=%s ", dayStr);
                }else{
                    Set<Integer> tidSet = tidList.stream().map(tid -> Integer.valueOf((int)(tid % 32))).collect(Collectors.toSet());
                    rtn = String.format(" part_time=%s and hashtid in (%s) ", dayStr,  CommonUtils.mkString(tidSet, ","));
                }
            } else {
                rtn = String.format(" part_time=%s and hashtid in (%s) ", dayStr, CommonUtils.mkString(batchList, ","));
            }
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
        return rtn;
    }
}
