package com.navinfo.tripanalysis.service.impl;

import com.navinfo.tripanalysis.common.util.DateUtils;
import com.navinfo.tripanalysis.pojo.AlgorithmOuterData;
import com.navinfo.tripanalysis.pojo.Point0200;
import com.navinfo.tripanalysis.service.AbstractSaveDataBigDataService;
import com.navinfo.tripanalysis.service.SavePointService;
import com.navinfo.tripanalysis.util.BigDataUtils;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;

import java.io.Serializable;
import java.util.*;
import java.util.stream.Collectors;

/**
 * 保存公共位置协议服务的实现
 * @author web
 */
public class SavePointServiceImpl extends AbstractSaveDataBigDataService implements SavePointService, Serializable {
    private static final Logger logger = LoggerFactory.getLogger(SavePointServiceImpl.class);

    @Override
    public void save(SparkSession spark, JavaPairRDD<Long, AlgorithmOuterData> pairTidOutData, long day) {
        long start = System.currentTimeMillis();
        logger.error("进行公共轨迹协议落盘，保存Hive开始...");

        JavaRDD<Row> pointRows = pairTidOutData.flatMap((FlatMapFunction<Tuple2<Long, AlgorithmOuterData>, Row>) tuple2 -> {
            AlgorithmOuterData outData = tuple2._2;
            List<Row> row0200 = Optional.ofNullable(outData.getPoints0200()).orElse(new ArrayList<>())
                    .stream().filter(e -> e.getTripId()!=0).map(e -> e.createRow()).collect(Collectors.toList());
            List<Row> row0f37 = Optional.ofNullable(outData.getPoints0f37()).orElse(new ArrayList<>())
                    .stream().filter(e -> e.getTripId()!=0).map(e -> e.createRow()).collect(Collectors.toList());
            row0200.addAll(row0f37);
            return row0200.iterator();
        });

        StructType schema = Point0200.createSchema();
        Dataset<Row> dataFrame = spark.createDataFrame(pointRows, schema);
        String dayStr = DateUtils.format(new Date(day), DateUtils.DateFormat.YYYYMMDD);

        //保存数据
        BigDataUtils.saveDataSetToHiveTable(spark, dataFrame, coalesceType, coalesceNumber, hiveDbName, hiveTableName,
                new String[]{"day","hashtid"}, new String[]{"'"+dayStr+"'","tid%32"}, getSelectColumns(schema));

        logger.error("进行公共轨迹协议落盘，保存Hive结束，耗{}ms", System.currentTimeMillis()-start);
    }
}
