package org.apache.dolphinscheduler.common.utils;

import com.alibaba.fastjson.JSONObject;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.apache.dolphinscheduler.common.Constants;
//import org.apache.dolphinscheduler.common.model.BaseDataSource;
//import org.apache.dolphinscheduler.common.task.datasource.RelationalDataSourceParameters;
import org.apache.spark.ml.PipelineModel;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.Closeable;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.TimeUnit;

public class SparkSqlUtils implements Closeable {
    public SparkSession spark;

    public Properties properties = new Properties();

    private static final Logger logger = LoggerFactory.getLogger(SparkSqlUtils.class);

    private final String baseHdfsPath = PropertyUtils.getString(Constants.FS_DEFAULTFS) + "/dolphinscheduler/dolphinscheduler/datasource";

    private static final String SPARK_SQL_UTILS_KEY = "SPARK_SQL_UTILS_KEY";

    private static final LoadingCache<String, SparkSqlUtils> cache = CacheBuilder
            .newBuilder()
            .expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS)
            .build(new CacheLoader<String, SparkSqlUtils>() {
                @Override
                public SparkSqlUtils load(String key) throws Exception {
                    return new SparkSqlUtils();
                }
            });

    public SparkSqlUtils() {

        System.setProperty("HADOOP_USER_NAME", "hdfs");

        this.spark = SparkSession
                .builder().master("local")
                .appName("JavaMulticlassLogisticRegressionWithElasticNetExample")
                .getOrCreate();

    }

    public static SparkSqlUtils getInstance() {
        return cache.getUnchecked(SPARK_SQL_UTILS_KEY);
    }

    public String[] readParquet(String path) {
        Dataset<Row> parquetFileDF = spark.read().parquet(path);
        String[] columns = parquetFileDF.columns();
        return columns;
    }

    public Dataset readFile(String path) {
        Dataset<Row> parquetFileDF = spark.read().parquet(path);
        return parquetFileDF;
    }

    public String getparquetFileDF(String path) {
        int lastIndexOf = path.lastIndexOf(".");
        //获取文件的后缀名 .jpg
        String suffix = path.substring(lastIndexOf);
        String hdfsPath = baseHdfsPath + "TextData/" + UUID.randomUUID();
        //String hdfsPath="D:/data/"+ UUID.randomUUID();
        if (".txt".equals(suffix)) {
            Dataset<Row> data = spark.read().text(path);
            data.write().parquet(hdfsPath);
            return hdfsPath;
        } else if (".csv".equals(suffix)) {
            Dataset<Row> data = spark.read().csv(path);
            data.write().parquet(hdfsPath);
            return hdfsPath;
        } else if (".json".equals(suffix)) {
            Dataset<Row> data = spark.read().json(path);
            data.write().parquet(hdfsPath);
            return hdfsPath;
        }
        return null;
    }

    public JSONObject showParquet(String path) {
        Dataset<Row> parquetFileDF = spark.read().parquet(path);
        String[] columns = parquetFileDF.columns();
        Dataset<Row> parquetFileDF1 = parquetFileDF.limit(100);
        JSONObject returnResult = new JSONObject();
        List<Row> list = parquetFileDF1.collectAsList();
        List<Object> result = new ArrayList<>();
        try {
            String[] name = path.split("\\\\|/");
            String[] names = {name[name.length - 1]};
            returnResult.put("fileName", names[0]);
            //result.add(columns);
            if (list.size() > 100) {
                for (int i = 0; i < 100; i++) {
                    JSONObject jsonObject = new JSONObject();
                    String content = list.get(i).toString().replace("[", "");
                    String content1 = content.replace("]", "");
                    //                    //System.out.println(content1);
                    String[] contents = content1.split(",");
                    // result.add(content1.split(","));
                    for (int j = 0; j < contents.length; j++) {
                        jsonObject.put(columns[j], contents[j]);
                    }
                    result.add(jsonObject);
                }
            } else {
                for (int i = 0; i < list.size(); i++) {
                    JSONObject jsonObject = new JSONObject();
                    String content = list.get(i).toString().replace("[", "");
                    String content1 = content.replace("]", "");
                    //System.out.println(content1);
                    String[] contents = content1.split(",");
                    // result.add(content1.split(","));
                    for (int j = 0; j < contents.length; j++) {
                        jsonObject.put(columns[j], contents[j]);
                    }
                    result.add(jsonObject);
                }
            }
            returnResult.put("fileContent", result);

        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
        return returnResult;
    }

    /**
     * 返回字段类型和名称
     */
    public Map<String, String> getColumnName(String Path) {

        Map<String, String> map = new HashMap<String, String>();

        StructType dataset = spark.read().load(Path).schema();

        for (int i = 0; i < dataset.size(); i++) {

            StructField structField = dataset.apply(i);
            map.put(structField.name(), structField.dataType().typeName());

        }

        return map;
    }

    public boolean isNumber(String column) {
        try {
            Float.parseFloat(column);
            return true;
        } catch (Exception e) {
            return false;
        }
    }

    public String toHDFS(PipelineModel pipelineModel, String userName) {
        try {
            String baseHdfsPath = Constants.FS_DEFAULTFS;
            String hdfsPath = baseHdfsPath + "/" + userName + "/" + UUID.randomUUID() + "0";
            pipelineModel.write().overwrite().save(hdfsPath);
            System.out.println(hdfsPath);
            return hdfsPath;
        } catch (Exception e) {
            logger.error("Failed to write to model" + e + e.getMessage());
            return null;
        }
    }


    @Override
    public void close() throws IOException {
        if (null != spark) {
            spark.close();
        }
    }
}
