package com.navinfo.platform.hive2mysql.service;

import com.navinfo.platform.common.service.ICommonService;
import com.navinfo.platform.common.service.ILoadDataChannel;
import com.navinfo.platform.common.service.ILoadDataService;
import com.navinfo.platform.common.service.impl.LoadDataFromHive;
import com.navinfo.platform.common.utils.DateUtils;
import com.navinfo.platform.common.utils.StatisticsDef;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;

public enum ExportLoadDataService implements ILoadDataService {
    //
    INS;

    private ILoadDataChannel hiveChannel = LoadDataFromHive.INSTANCE;

    @Override
    public List<Object> loadData(SparkSession spark, Map<String, String> configMap) {
        String day = configMap.get(ICommonService.DAY_VALUE);
        String sql = configMap.get("hive.sql");
        String exSql = configMap.get("hive.ex.sql");
        int type = configMap.get("args_2") == null ? 0 : Integer.parseInt(configMap.get("args_2"));//0:全部,1:天,2:周,3:月
        Tuple2<String, String> weekDateTuple = DateUtils.getStartEndDay(day, StatisticsDef.WEEK);
        Tuple2<String, String> monthDateTuple = DateUtils.getStartEndDay(day, StatisticsDef.MONTH);
        switch (type) {
            case 0:
            case 1:
                sql = String.format(sql, "day", day);
                exSql = String.format(exSql, "day", day);
                break;
            case 2:
                sql = String.format(sql, "week", weekDateTuple._1());
                exSql = String.format(exSql, "week", weekDateTuple._1());
                break;
            case 3:
                sql = String.format(sql, "month", monthDateTuple._1());
                exSql = String.format(exSql, "month", monthDateTuple._1());
                break;
            default:
                sql = String.format(sql, "day", day);
                exSql = String.format(exSql, "day", day);
        }
        Dataset<Row> dataset = hiveChannel.readData(spark, sql);
        Dataset<Row> exDataset = hiveChannel.readData(spark, exSql);
        List<Object> list = new ArrayList<>(2);
        list.add(dataset);
        list.add(exDataset);
        return list;
    }
}
