package com.navinfo.tripanalysis.service.impl;

import com.navinfo.tripanalysis.common.arithmetic.common.OuterStatisticData;
import com.navinfo.tripanalysis.common.arithmetic.convert.StatisticExtendedDataParquetConvert;
import com.navinfo.tripanalysis.common.util.DateUtils;
import com.navinfo.tripanalysis.service.AbstractSaveDataBigDataService;
import com.navinfo.tripanalysis.service.SaveTripService;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;

/**
 * 保存行程扩展信息到hdfs中的hive指定分区中
 */
public class SaveTripExtendServiceBigDataImpl extends AbstractSaveDataBigDataService implements SaveTripService, Serializable {
    private static final Logger logger = LoggerFactory.getLogger(SaveTripExtendServiceBigDataImpl.class);

    @Override
    public void save(SparkSession spark, JavaSparkContext jsc, JavaPairRDD<Long, List<OuterStatisticData>> pairRDD, long d) {
        long start = System.currentTimeMillis();
        logger.error("进行行程扩展统计信息落盘，保存Hive开始...");

        final StructType schema = StatisticExtendedDataParquetConvert.createSchema();
        JavaRDD<Row> tripRows = pairRDD.flatMap((FlatMapFunction<Tuple2<Long, List<OuterStatisticData>>, Row>) longListTuple2 -> {
            List<OuterStatisticData> srcList = longListTuple2._2;
            List<Row> list = new ArrayList<>(srcList.size());
            for (OuterStatisticData m : srcList) {
                list.add(StatisticExtendedDataParquetConvert.createRow(m, schema));
            }
            return list.iterator();
        });

        String day = DateUtils.format(new Date(d), DateUtils.DateFormat.YYYYMMDD);
        String outPath = hdfsBasePath + "/day=" + day;

        //保存数据
        Dataset<Row> dataFrame = spark.createDataFrame(tripRows.rdd(), schema);
        saveParquet(spark, dataFrame, outPath, Collections.singletonList(day), getSelectColumns(schema));
        logger.error("进行行程扩展统计信息落盘，保存Hive数据结束，耗{}ms", System.currentTimeMillis() - start);

        //手动清理资源
        tripRows.unpersist();
    }
}
