package com.navinfo.tripanalysis.offline.service.impl;

import com.navinfo.tripanalysis.common.util.CommonUtils;
import com.navinfo.tripanalysis.offline.util.BigDataUtils;
import lombok.Data;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.Serializable;
import java.util.Arrays;
import java.util.List;


/**
 * 通用保存Hive数据的抽象类<br/>
 * 保存行程统计、行程事件、行程缓存、公共位置点
 * @author 沈东生，web
 */
@Data
public abstract class AbstractHiveSaveService implements Serializable {
    protected Logger logger = LoggerFactory.getLogger(getClass());

    /**
     * 落盘类型
     */
    protected BigDataUtils.SaveType saveType;
    /**
     * 分区类型
     */
    protected BigDataUtils.CoalesceType coalesceType;
    /**
     * 分区个数
     */
    protected Integer coalesceNumber;
    /**
     * hive表对应的hdfs表目录
     */
    protected String hdfsPath;
    /**
     * hive库名
     */
    protected String hiveDbName;
    /**
     * hive表名
     */
    protected String hiveTableName;
    /**
     * 数据导入类型
     */
    protected BigDataUtils.InsertType insertType;

    /**
     * @param spark
     * @param resourceRDD           来源的PairRDD
     * @param outPath               表对应的hdfs目录
     * @param isDynamicPartition  是否动态分区保存
     * @param partitionKeys        分区字段
     * @param partitionValues      分区值
     */
    protected void saveData(SparkSession spark, JavaPairRDD resourceRDD, String outPath, boolean isDynamicPartition, List<String> partitionKeys, List<String> partitionValues) {
        if(CommonUtils.isCollectionEmpty(partitionKeys) || CommonUtils.isCollectionEmpty(partitionValues) ){
            logger.error("数据落盘时的分区键或分区值为空...");
            return;
        }

        StructType schema = createSchema();
        Dataset<Row> dataFrame = spark.createDataFrame(createRowRDD(resourceRDD), schema);

        if(isDynamicPartition){
            if(insertType == null) {
                BigDataUtils.saveHiveTableDynamic(spark, dataFrame, coalesceType, coalesceNumber, hiveDbName, hiveTableName, partitionKeys, partitionValues, Arrays.asList(schema.fieldNames()));
            }else{
                BigDataUtils.saveHiveTableDynamicByInsertType(spark, dataFrame, coalesceType, coalesceNumber,insertType ,hiveDbName, hiveTableName, partitionKeys, partitionValues, Arrays.asList(schema.fieldNames()));
            }
        }else{
            if(BigDataUtils.SaveType.HIVE.equals(saveType)){
                if(insertType == null){
                    BigDataUtils.saveHiveTable(spark, dataFrame, coalesceType, coalesceNumber, hiveDbName, hiveTableName,  partitionKeys,  partitionValues, Arrays.asList(schema.fieldNames()));
                }else {
                    BigDataUtils.saveHiveTableByInsertType(spark, dataFrame, coalesceType, coalesceNumber,insertType, hiveDbName, hiveTableName,  partitionKeys,  partitionValues, Arrays.asList(schema.fieldNames()));
                }
            }else if(BigDataUtils.SaveType.HDFS.equals(saveType)) {
                BigDataUtils.saveParquet(dataFrame, outPath, coalesceType, coalesceNumber);
            }else{
                logger.error("没有配置数据落盘的保存类型...");
            }
        }
    }

    /**
     * 创建表的StructType信息
     * @return
     */
    protected abstract StructType createSchema();

    /**
     * 创建表的Row信息
     * @param resourceRDD 来源rdd
     * @return
     */
    protected abstract JavaRDD<Row> createRowRDD(JavaPairRDD resourceRDD);

}
