package com.navinfo.tripanalysis.service;

import com.navinfo.tripanalysis.util.BigDataUtils;
import lombok.Data;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;

import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;


/**
 * 保存大数据基类
 *
 * @author 沈东生
 */
@Data
public class AbstractSaveDataBigDataService implements Serializable {
    protected String saveType;
    protected Integer coalesceNumber;
    protected BigDataUtils.CoalesceType coalesceType;
    protected String hdfsBasePath;
    protected String hiveDbName;
    protected String hiveTableName;
    protected List<String> partationKeys= Collections.singletonList("day");

    /**
     * @param spark
     * @param dataFrame        保存的DataSet
     * @param outPath          保存的hdfs目录
     * @param partationValues 分区值
     * @param columns          所有字段
     */
    protected void saveParquet(SparkSession spark, Dataset<Row> dataFrame, String outPath, List<Object> partationValues,List<String> columns) {
        if(StringUtils.equalsIgnoreCase(saveType, BigDataUtils.SAVE_TYPE_HIVE)){
            BigDataUtils.saveDataSetToHiveTable(spark,dataFrame,coalesceType, coalesceNumber, hiveDbName, hiveTableName, partationKeys, partationValues, columns);
        }else if(StringUtils.equalsIgnoreCase(saveType, BigDataUtils.SAVE_TYPE_HDFS)) {
            BigDataUtils.saveParquet(dataFrame, outPath, coalesceType, coalesceNumber);
        }else{
            System.out.println("没有配置保存数据到hive的保存类型");
        }
    }

    protected List<String> getSelectColumns(StructType schema) {
        return Arrays.asList(schema.fieldNames());
    }
}
