package com.navinfo.tripanalysis.service.impl;

import com.navinfo.tripanalysis.pojo.LoadDataParam;
import com.navinfo.tripanalysis.pojo.Point;
import com.navinfo.tripanalysis.service.AbstractLoad0F37DataServiceBigData;
import com.navinfo.tripanalysis.util.BigDataUtils;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.List;

/**
 * 通过HDFS文件加载0f37数据
 * @author 沈东生
 */
public class Load0F37DataServiceHdfsImpl extends AbstractLoad0F37DataServiceBigData {
    private static final Logger logger = LoggerFactory.getLogger(Load0F37DataServiceHdfsImpl.class);
    private String basePath;
    public String getBasePath() {
        return basePath;
    }
    public void setBasePath(String basePath) {
        this.basePath = basePath;
    }

    @Override
    public JavaRDD<Point> load(SparkSession spark, JavaSparkContext jsc, LoadDataParam param) {
        List<Integer> batchList = (List<Integer>) param.getExtendParam("batchList");
        List<String> realPaths = BigDataUtils.getHDFSPath(param.getDay(), batchList, getBasePath());

        if (realPaths.size() > 0) {
            String whereStr = getWhereSql0f37(param);
            logger.info("过滤条件为：{}", whereStr);
            return toJavaRdd(spark.read().parquet(realPaths.toArray(new String[realPaths.size()])).select("tid", COL_NAMES.toArray(new String[0])).where(whereStr));
        }

        return null;
    }
}
