package com.navinfo.tripanalysis.util;

import com.navinfo.tripanalysis.common.util.DateUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.net.URISyntaxException;
import java.util.*;
import java.util.stream.Collectors;

/**
 * 大数据服务相关的工具类
 * @author 沈东生
 */
public class BigDataUtils {
    private static final Logger logger = LoggerFactory.getLogger(BigDataUtils.class);

    /**
     * 通过保存hdfs parquet文件的方式保存文件
     */
    public static final String SAVE_TYPE_HDFS = "hdfs";
    /**
     * 通过hive insert overwrite语句的方式保存数据
     */
    public static final String SAVE_TYPE_HIVE = "hive";

    /**
     * 获取数据表对应的HDFS完整文件夹
     * @param day       日期
     * @param batchList 按批次加载的terminalId列表
     * @param basePath  hdfs文件根路径
     * @return
     * @throws URISyntaxException
     * @throws IOException
     */
    public static List<String> getHDFSPath(Date day, List<Integer> batchList, String basePath) {
        List<String> list = new ArrayList<>();
        String dayStr = DateUtils.format(day, DateUtils.DateFormat.YYYYMMDD);
        try {
            FileSystem fs = FileSystem.get(new java.net.URI(basePath), new org.apache.hadoop.conf.Configuration());
            if (CommonUtils.isCollectionEmpty(batchList)) {
                String dataPathStr = String.format("%s/part_time=%s", basePath, dayStr);
                Path dataPath = new Path(dataPathStr);
                if (fs.exists(dataPath)) {
                    list.add(dataPathStr);
                } else {
                    logger.info("dataPathStr:{} is not exist", dataPathStr);
                }
            } else {
                for (Integer id : batchList) {
                    String dataPathStr = String.format("%s/part_time=%s/hashtid=%d", basePath, dayStr, id);
                    Path dataPath = new Path(dataPathStr);
                    if (fs.exists(dataPath)) {
                        list.add(dataPathStr);
                    } else {
                        logger.info("dataPathStr:{} is not exist", dataPathStr);
                    }
                }
            }
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
        return list;
    }

    /**
     * 获取批次数据的Where条件
     * @param day
     * @param batchList 分批加载的hashtid列表，0到32
     * @param tidList   通过参数指定，特定加载的tid列表
     * @return
     */
    public static String getBatchWhereSql(Date day, List<Integer> batchList, List<Long> tidList) {
        String dayStr = DateUtils.format(day, DateUtils.DateFormat.YYYYMMDD);
        String rtn = "";
        try {
            //是否分区
            if (CommonUtils.isCollectionEmpty(batchList)) {
                //是否加载特定tid
                if(CommonUtils.isCollectionEmpty(tidList)){
                    rtn = String.format(" part_time=%s ", dayStr);
                }else{
                    Set<Integer> tidSet = tidList.stream().map(tid -> tid.intValue() % 32).collect(Collectors.toSet());
                    rtn = String.format(" part_time=%s and hashtid in (%s) ", dayStr,  CommonUtils.mkString(tidSet, ","));
                }
            } else {
                rtn = String.format(" part_time=%s and hashtid in (%s) ", dayStr, CommonUtils.mkString(batchList, ","));
            }
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
        return rtn;
    }

    /**
     * 保存parquet文件
     * @param dataFrame      数据集
     * @param outPath        保存路径
     * @param type           减小Rdd分区数的方式
     * @param coalesceNumber
     */
    public static void saveParquet(Dataset<Row> dataFrame, String outPath, CoalesceType type, Integer coalesceNumber) {
        repartation(dataFrame, type, coalesceNumber).write().mode(SaveMode.Overwrite).parquet(outPath);
    }

    /**
     * 将dataSet中的数据保存到hive表中
     * @param spark
     * @param ds              数据集
     * @param coalesceType    分区方式
     * @param coalesceNumber  分区数
     * @param hiveDbName      保存到的hive数据库名
     * @param saveTableName   hive 表名
     * @param partationKeys   分区键
     * @param partationValues 分区键对应的值
     * @param selectColumns   查询对应的sql语句
     */
    public static void saveDataSetToHiveTable(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String hiveDbName, String saveTableName, List<String> partationKeys, List<Object> partationValues, List<String> selectColumns) {
        ds = repartation(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + saveTableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", hiveDbName, saveTableName));
        sb.append("\n");

        //分区信息
        if (null != partationKeys && partationKeys.size()>0) {
            List<String> partationList = new ArrayList<>();
            for (int i=0; i<partationKeys.size(); i++) {
                String key = partationKeys.get(i);
                String value = partationValues.get(i).toString();
                partationList.add(String.format("%s=%s", key, value));
            }
            sb.append(String.format("PARTITION(%s)",  CommonUtils.mkString(partationList, ",")));
            sb.append("\n");
        }

        sb.append(String.format(" select %s \n from \n %s ", CommonUtils.mkString(selectColumns, ","), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);
        spark.sql(sql);
    }

    /**
     * 支持动态分区字段的hive保存
     * @param spark
     * @param ds
     * @param coalesceType
     * @param coalesceNumber
     * @param hiveDbName
     * @param tableName
     * @param partition   动态分区字段
     * @param partitionColumns  动态分区字段在select中的表达式
     * @param selectColumns
     */
    public static void saveDataSetToHiveTable(SparkSession spark, Dataset ds, CoalesceType coalesceType, Integer coalesceNumber, String hiveDbName, String tableName, String[] partition, String[] partitionColumns, List<String> selectColumns) {
        ds = repartation(ds, coalesceType, coalesceNumber);
        String tmpTableName = "tmp_" + tableName;
        ds.createOrReplaceTempView(tmpTableName);

        StringBuilder sb = new StringBuilder();
        sb.append(String.format("insert overwrite table %s.%s ", hiveDbName, tableName));
        sb.append("\n");

        //创建落盘字段数组
        List<String> selectColumnsAL =  new ArrayList<>(selectColumns);

        //分区信息
        Optional.ofNullable(partition).ifPresent(e -> {
            sb.append(String.format("PARTITION(%s)", String.join(",",e)));
            sb.append("\n");

            Optional.ofNullable(partitionColumns).ifPresent(c ->{
                selectColumnsAL.addAll(new ArrayList<>(Arrays.asList(c)));
            });
        });

        sb.append(String.format(" select %s \n from \n %s ", String.join(",", selectColumnsAL), tmpTableName));
        String sql = sb.toString();
        logger.error("OOO->sql:{}", sql);

        //开启动态分区
        spark.sql("set  hive.exec.dynamic.partition.mode = nonstrict");
        spark.sql("set  hive.exec.dynamic.partition = true");
        spark.sql(sql);
    }

    /**
     * 重置分区
     * @param dataFrame
     * @param type
     * @param coalesceNumber
     * @return
     */
    private static Dataset<Row> repartation(Dataset<Row> dataFrame, CoalesceType type, Integer coalesceNumber) {
        coalesceNumber = null==coalesceNumber ? 1 : coalesceNumber;
        type = null==type ? CoalesceType.repartation : type;

        return type==CoalesceType.coalesce ? dataFrame.coalesce(coalesceNumber) : dataFrame.repartition(coalesceNumber);
    }

    public enum CoalesceType {
        /**
         * 通过coalesce方式修改分区数
         */
        coalesce,
        /**
         * 通过repartation方式修改分区数
         */
        repartation
    }
}
