package com.yomob.kylin.client;

import java.io.IOException;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.Collections;
import java.util.List;

import com.yomob.kylin.exception.KirinRuntimeException;
import org.apache.commons.collections.CollectionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.collect.Lists;
import com.yomob.kylin.support.OptionSupport;

public class SparkClient {
    private static final Logger LOGGER = LoggerFactory.getLogger(SparkClient.class);

    private SparkSession sparkSession;

    public SparkClient(SparkSession sparkSession) {
        this.sparkSession = sparkSession;
    }

    public SparkSession getSparkSession() {
        return sparkSession;
    }


    public Dataset<Row> getDataset(LocalDate startDate, LocalDate endDate) {
        String hdfsPath = OptionSupport.getHdfsPath();
        FileSystem fs;
        Configuration hadoopConfiguration = getSparkSession().sparkContext().hadoopConfiguration();
        List<String> files = Lists.newArrayList();
        try {
            fs = FileSystem.get(hadoopConfiguration);
            LocalDate date = startDate;
            while (!date.isAfter(endDate)) {
                files.addAll(getFileNames(fs, hdfsPath, date));
                date = date.plusDays(1);
            }
            fs.close();
        } catch (IOException e) {
            throw new KirinRuntimeException(e);
        }
        if (CollectionUtils.isEmpty(files)) {
            return null;
        }
        LOGGER.info(">>> HDFS Paths: {}", files);

        return getSparkSession().read().json(files.toArray(new String[files.size()]));
    }

    public Dataset<Row> getDataset(List<LocalDate> dates) {
        String hdfsPath = OptionSupport.getHdfsPath();
        FileSystem fs;
        Configuration hadoopConfiguration = getSparkSession().sparkContext().hadoopConfiguration();
        List<String> files = Lists.newArrayList();
        try {
            fs = FileSystem.get(hadoopConfiguration);
            for (LocalDate date : dates) {
                files.addAll(getFileNames(fs, hdfsPath, date));
            }
            fs.close();
        } catch (IOException e) {
            throw new KirinRuntimeException(e);
        }
        if (CollectionUtils.isEmpty(files)) {
            return null;
        }
        LOGGER.info(">>> HDFS Paths: {}", files);

        return getSparkSession().read().json(files.toArray(new String[files.size()]));

    }

    private List<String> getFileNames(FileSystem fs, String hdfsPath, LocalDate date) throws IOException {
        List<String> files = Lists.newArrayList();
        String datePath = date.format(DateTimeFormatter.ofPattern("yyyyMMdd"));
        Path directory = new Path(hdfsPath, datePath);
        if (!fs.exists(directory)) {
            return Collections.emptyList();
        }
        FileStatus[] fileStatus = fs.listStatus(directory);
        for (FileStatus status : fileStatus) {
            if(!status.getPath().toString().endsWith(".tmp")){
                files.add(status.getPath().toString());
            }
        }
        return files;
    }


}
