package com.navinfo.platform.dataanalysis.service.impl;

import com.navinfo.platform.common.service.ICommonService;
import com.navinfo.platform.common.service.ILoadDataChannel;
import com.navinfo.platform.common.service.ILoadDataService;
import com.navinfo.platform.common.service.impl.LoadDataFromHdfs;
import com.navinfo.platform.common.service.impl.LoadDataFromHive;
import com.navinfo.platform.common.service.impl.LoadDataFromMongo;
import org.apache.commons.lang.StringUtils;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.storage.StorageLevel;

import java.util.*;
import java.util.stream.Collectors;

import static org.apache.spark.sql.functions.col;

/***
 * @author 油门统计
 */
public enum GasDataLoadService implements ILoadDataService {
    INSTANCE;
    private ILoadDataChannel hiveChannel = LoadDataFromHive.INSTANCE;
    private ILoadDataChannel hdfsChannel = LoadDataFromHdfs.INSTANCE;

    @Override
    public List<Object> loadData(SparkSession spark, Map<String, String> configMap) {
        String env = configMap.get("run.env");
        String day = configMap.get(ICommonService.DAY_VALUE);
        String hql0200 = String.format(configMap.get("hdfs.0200.hql"), day);

        //获取【某天】的所有0200数据
        Dataset<Row> locationDataset;
        if (StringUtils.equals(env, "local")) {
            //选取0200的列
            String hqlpath = configMap.get("hdfs.0200.path");
            Dataset<Row> allFiled = hdfsChannel.readData(spark, String.format(hqlpath, day));
            String tableName0200 = hql0200.substring(hql0200.indexOf("from") + 4 , hql0200.indexOf("where")).trim();
            allFiled.createOrReplaceTempView(tableName0200);
            String locationDataSql = hql0200.substring(0, hql0200.indexOf("where"));
            locationDataset = spark.sql(locationDataSql);
        } else {
            locationDataset = hiveChannel.readData(spark, hql0200);
        }
//        System.out.println("获取0200总数："+locationDataset.count());


        //获取【某天】行程中包含满载的tid列表
//        String tripPath = configMap.get("hdfs.trip.path");
//        Dataset<Row> tripRows = hdfsChannel.readData(spark,tripPath+day.substring(0,4)+"/"+day.substring(4,6)+"/"+day.substring(6,8)+"/data/*");
//        tripRows.registerTempTable("tripTable");
//        Dataset<Row> tripDataset = spark.sql("select distinct terminalId from tripTable where vehicleEstimatedLoad > 350");
//        System.out.println("从行程数据搜索出tid总数："+tripDataset.count());

        //获取375、500车型集合
        Dataset<Row> dataset375 = spark.read().text(configMap.get("hdfs.375.path"));
        Dataset<Row> dataset500 = spark.read().text(configMap.get("hdfs.500.path"));
//        System.out.println("375车型总数："+dataset375.count());
//        System.out.println("500车型总数："+dataset500.count());



        //375、500车型满载列表
//        Dataset<Row> trip375 = dataset375.join(tripDataset, col("value").equalTo(col("terminalId")));
//        Dataset<Row> trip500 = dataset500.join(tripDataset, col("value").equalTo(col("terminalId")));
//        System.out.println("满载375车型总数："+trip375.count());
//        System.out.println("满载500车型总数："+trip500.count());

        return Arrays.asList(locationDataset, dataset375, dataset500);
    }

}
