package com.navinfo.tripanalysis.offline.service.impl;

import com.navinfo.tripanalysis.common.pojo.Point;
import com.navinfo.tripanalysis.common.pojo.Point0F39;
import com.navinfo.tripanalysis.offline.pojo.LoadDataParam;
import com.navinfo.tripanalysis.offline.service.LoadPoint0F39Service;
import com.navinfo.tripanalysis.offline.util.PointUtils;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;


import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

/**
 * 加载0F39数据服务抽象类
 * @author hmc
 */
public abstract class AbstractLoadPoint0F39Service extends AbstractHiveLoadService implements LoadPoint0F39Service {

    /**
     * tid字段默认字段名
     */
    protected String TID_NAME = "terminalid";
    /**
     * 加载的字段列表
     */
    protected static final List<String> COL_NAMES = new ArrayList<>();


    @Override
    public JavaRDD<Point0F39> load(SparkSession spark, JavaSparkContext jsc, LoadDataParam param) {
        return super.loadData(spark, param).flatMap(row -> getPoint(row));
    }


    @Override
    protected List<String> getSelectColumns() {
        return new ArrayList<>(COL_NAMES);
    }


    protected List<String> getHDFSRealPaths(LoadDataParam param){
        return PointUtils.getHDFSPathWithoutHashtid(getHdfsPath(), param);
    }

    /**
     * 将row转为具体的Point类型
     * @param row
     * @return
     */
    protected abstract Iterator<Point0F39> getPoint(Row row) throws CloneNotSupportedException;
}
